浏览代码

Re-basing hadoop-2.3 from branch-2

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.3@1561697 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 11 年之前
父节点
当前提交
fe18df20f3
共有 100 个文件被更改,包括 4164 次插入1383 次删除
  1. 3 0
      dev-support/test-patch.sh
  2. 2 2
      hadoop-assemblies/pom.xml
  3. 45 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
  4. 11 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  5. 1 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  6. 2 2
      hadoop-client/pom.xml
  7. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  8. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  9. 7 58
      hadoop-common-project/hadoop-auth/pom.xml
  10. 7 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  11. 7 16
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
  12. 12 14
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  13. 26 21
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  14. 43 17
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
  15. 41 20
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
  16. 11 6
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java
  17. 64 49
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  18. 39 33
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
  19. 45 31
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  20. 18 11
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
  21. 15 4
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  22. 6 7
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
  23. 21 15
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
  24. 316 32
      hadoop-common-project/hadoop-common/CHANGES.txt
  25. 11 5
      hadoop-common-project/hadoop-common/LICENSE.txt
  26. 17 1
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  27. 19 6
      hadoop-common-project/hadoop-common/pom.xml
  28. 1 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  29. 1 1
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.cmd
  30. 36 0
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  31. 13 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  32. 118 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java
  33. 66 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
  34. 113 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java
  35. 30 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  36. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  37. 37 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
  38. 11 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  39. 56 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
  40. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
  41. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
  42. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
  43. 4 151
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  44. 286 158
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  45. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java
  46. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  47. 270 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
  48. 53 44
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  49. 5 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
  50. 79 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
  51. 32 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidRequestException.java
  52. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  53. 24 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  54. 25 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  55. 164 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  56. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java
  57. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java
  58. 173 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
  59. 36 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java
  60. 5 89
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
  61. 10 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  62. 15 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
  63. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  64. 12 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  65. 13 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
  66. 94 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
  67. 62 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java
  68. 403 177
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  69. 48 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java
  70. 118 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
  71. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
  72. 1 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  73. 6 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableFactories.java
  74. 16 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
  75. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
  76. 35 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressionCodec.java
  77. 59 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressor.java
  78. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
  79. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
  80. 10 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
  81. 17 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
  82. 72 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
  83. 84 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
  84. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
  85. 165 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
  86. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java
  87. 10 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  88. 75 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  89. 213 167
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  90. 40 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
  91. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
  92. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  93. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
  94. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
  95. 13 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
  96. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
  97. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
  98. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java
  99. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
  100. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java

+ 3 - 0
dev-support/test-patch.sh

@@ -395,6 +395,9 @@ checkJavadocWarnings () {
   echo ""
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
 
+  #There are 12 warnings that are caused by things that are caused by using sun internal APIs.
+  #There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc.
+  OK_JAVADOC_WARNINGS=14;
   ### if current warnings greater than OK_JAVADOC_WARNINGS
   if [[ $javadocWarnings -gt $OK_JAVADOC_WARNINGS ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,12 +23,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 45 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml

@@ -0,0 +1,45 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-sls</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <fileSets>
+    <fileSet>
+      <directory>${basedir}/src/main/bin</directory>
+      <outputDirectory>sls/bin</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/html</directory>
+      <outputDirectory>sls/html</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sample-conf</directory>
+      <outputDirectory>sls/sample-conf</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/data</directory>
+      <outputDirectory>sls/sample-data</outputDirectory>
+    </fileSet>
+  </fileSets>
+
+</assembly>

+ 11 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -93,6 +93,17 @@
         <include>*-sources.jar</include>
       </includes>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-sls/target</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
+      <includes>
+        <include>*-sources.jar</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-sls/target/hadoop-sls-${project.version}/sls</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sls</outputDirectory>
+    </fileSet>
   </fileSets>
   <dependencySets>
     <dependencySet>

+ 1 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -207,8 +207,7 @@
       <outputDirectory>/share/hadoop/${hadoop.component}/lib</outputDirectory>
       <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
       <excludes>
-        <exclude>org.apache.hadoop:hadoop-common</exclude>
-        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <exclude>org.apache.hadoop:*</exclude>
         <!-- use slf4j from common to avoid multiple binding warnings -->
         <exclude>org.slf4j:slf4j-api</exclude>
         <exclude>org.slf4j:slf4j-log4j12</exclude>

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 7 - 58
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>
@@ -33,7 +33,6 @@
 
   <properties>
     <maven.build.timestamp.format>yyyyMMdd</maven.build.timestamp.format>
-    <kerberos.realm>LOCALHOST</kerberos.realm>
   </properties>
 
   <dependencies>
@@ -93,6 +92,11 @@
       <artifactId>slf4j-log4j12</artifactId>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
@@ -101,35 +105,7 @@
   </dependencies>
 
   <build>
-    <testResources>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-        <filtering>true</filtering>
-        <includes>
-          <include>krb5.conf</include>
-        </includes>
-      </testResource>
-    </testResources>
     <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <forkMode>always</forkMode>
-          <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
-          <systemPropertyVariables>
-            <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
-            <kerberos.realm>${kerberos.realm}</kerberos.realm>
-          </systemPropertyVariables>
-          <excludes>
-            <exclude>**/${test.exclude}.java</exclude>
-            <exclude>${test.exclude.pattern}</exclude>
-            <exclude>**/TestKerberosAuth*.java</exclude>
-            <exclude>**/TestAltKerberosAuth*.java</exclude>
-            <exclude>**/Test*$*.java</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-source-plugin</artifactId>
@@ -149,33 +125,6 @@
   </build>
 
   <profiles>
-    <profile>
-      <id>testKerberos</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-surefire-plugin</artifactId>
-            <configuration>
-              <forkMode>always</forkMode>
-              <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
-              <systemPropertyVariables>
-                <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
-                <kerberos.realm>${kerberos.realm}</kerberos.realm>
-              </systemPropertyVariables>
-              <excludes>
-                <exclude>**/${test.exclude}.java</exclude>
-                <exclude>${test.exclude.pattern}</exclude>
-                <exclude>**/Test*$*.java</exclude>
-              </excludes>
-            </configuration>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
     <profile>
       <id>docs</id>
       <activation>

+ 7 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator {
         doSpnegoSequence(token);
       } else {
         LOG.debug("Using fallback authenticator sequence.");
-        getFallBackAuthenticator().authenticate(url, token);
+        Authenticator auth = getFallBackAuthenticator();
+        // Make sure that the fall back authenticator have the same
+        // ConnectionConfigurator, since the method might be overridden.
+        // Otherwise the fall back authenticator might not have the information
+        // to make the connection (e.g., SSL certificates)
+        auth.setConnectionConfigurator(connConfigurator);
+        auth.authenticate(url, token);
       }
     }
   }

+ 7 - 16
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java

@@ -13,7 +13,6 @@
  */
 package org.apache.hadoop.security.authentication;
 
-
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.login.AppConfigurationEntry;
@@ -26,6 +25,7 @@ import java.io.File;
 import java.security.Principal;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
+import java.util.UUID;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -36,32 +36,23 @@ import java.util.concurrent.Callable;
  * Test helper class for Java Kerberos setup.
  */
 public class KerberosTestUtils {
-  private static final String PREFIX = "hadoop-auth.test.";
-
-  public static final String REALM = PREFIX + "kerberos.realm";
-
-  public static final String CLIENT_PRINCIPAL = PREFIX + "kerberos.client.principal";
-
-  public static final String SERVER_PRINCIPAL = PREFIX + "kerberos.server.principal";
-
-  public static final String KEYTAB_FILE = PREFIX + "kerberos.keytab.file";
+  private static String keytabFile = new File(System.getProperty("test.dir", "target"),
+          UUID.randomUUID().toString()).toString();
 
   public static String getRealm() {
-    return System.getProperty(REALM, "LOCALHOST");
+    return "EXAMPLE.COM";
   }
 
   public static String getClientPrincipal() {
-    return System.getProperty(CLIENT_PRINCIPAL, "client") + "@" + getRealm();
+    return "client@EXAMPLE.COM";
   }
 
   public static String getServerPrincipal() {
-    return System.getProperty(SERVER_PRINCIPAL, "HTTP/localhost") + "@" + getRealm();
+    return "HTTP/localhost@EXAMPLE.COM";
   }
 
   public static String getKeytabFile() {
-    String keytabFile =
-      new File(System.getProperty("user.home"), System.getProperty("user.name") + ".keytab").toString();
-    return System.getProperty(KEYTAB_FILE, keytabFile);
+    return keytabFile;
   }
 
   private static class KerberosConfiguration extends Configuration {

+ 12 - 14
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -2,9 +2,9 @@
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -13,10 +13,7 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
-import junit.framework.Assert;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import junit.framework.TestCase;
-import org.mockito.Mockito;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.FilterHolder;
@@ -27,19 +24,20 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
-import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.OutputStreamWriter;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
 import java.io.Writer;
 import java.net.HttpURLConnection;
 import java.net.ServerSocket;
 import java.net.URL;
 import java.util.Properties;
+import org.junit.Assert;
 
-public abstract class AuthenticatorTestCase extends TestCase {
+public class AuthenticatorTestCase {
   private Server server;
   private String host = null;
   private int port = -1;
@@ -150,18 +148,18 @@ public abstract class AuthenticatorTestCase extends TestCase {
         writer.write(POST);
         writer.close();
       }
-      assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
       if (doPost) {
         BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
         String echo = reader.readLine();
-        assertEquals(POST, echo);
-        assertNull(reader.readLine());
+        Assert.assertEquals(POST, echo);
+        Assert.assertNull(reader.readLine());
       }
       aUrl = new AuthenticatedURL();
       conn = aUrl.openConnection(url, token);
       conn.connect();
-      assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
-      assertEquals(tokenStr, token.toString());
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assert.assertEquals(tokenStr, token.toString());
     } finally {
       stop();
     }

+ 26 - 21
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -13,8 +13,8 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
-import junit.framework.Assert;
-import junit.framework.TestCase;
+import org.junit.Assert;
+import org.junit.Test;
 import org.mockito.Mockito;
 
 import java.net.HttpURLConnection;
@@ -24,46 +24,48 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-public class TestAuthenticatedURL extends TestCase {
+public class TestAuthenticatedURL {
 
+  @Test
   public void testToken() throws Exception {
     AuthenticatedURL.Token token = new AuthenticatedURL.Token();
-    assertFalse(token.isSet());
+    Assert.assertFalse(token.isSet());
     token = new AuthenticatedURL.Token("foo");
-    assertTrue(token.isSet());
-    assertEquals("foo", token.toString());
+    Assert.assertTrue(token.isSet());
+    Assert.assertEquals("foo", token.toString());
 
     AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
     AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
-    assertEquals(token1.hashCode(), token2.hashCode());
-    assertTrue(token1.equals(token2));
+    Assert.assertEquals(token1.hashCode(), token2.hashCode());
+    Assert.assertTrue(token1.equals(token2));
 
     token1 = new AuthenticatedURL.Token();
     token2 = new AuthenticatedURL.Token("foo");
-    assertNotSame(token1.hashCode(), token2.hashCode());
-    assertFalse(token1.equals(token2));
+    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
+    Assert.assertFalse(token1.equals(token2));
 
     token1 = new AuthenticatedURL.Token("foo");
     token2 = new AuthenticatedURL.Token();
-    assertNotSame(token1.hashCode(), token2.hashCode());
-    assertFalse(token1.equals(token2));
+    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
+    Assert.assertFalse(token1.equals(token2));
 
     token1 = new AuthenticatedURL.Token("foo");
     token2 = new AuthenticatedURL.Token("foo");
-    assertEquals(token1.hashCode(), token2.hashCode());
-    assertTrue(token1.equals(token2));
+    Assert.assertEquals(token1.hashCode(), token2.hashCode());
+    Assert.assertTrue(token1.equals(token2));
 
     token1 = new AuthenticatedURL.Token("bar");
     token2 = new AuthenticatedURL.Token("foo");
-    assertNotSame(token1.hashCode(), token2.hashCode());
-    assertFalse(token1.equals(token2));
+    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
+    Assert.assertFalse(token1.equals(token2));
 
     token1 = new AuthenticatedURL.Token("foo");
     token2 = new AuthenticatedURL.Token("bar");
-    assertNotSame(token1.hashCode(), token2.hashCode());
-    assertFalse(token1.equals(token2));
+    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
+    Assert.assertFalse(token1.equals(token2));
   }
 
+  @Test
   public void testInjectToken() throws Exception {
     HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
     AuthenticatedURL.Token token = new AuthenticatedURL.Token();
@@ -72,6 +74,7 @@ public class TestAuthenticatedURL extends TestCase {
     Mockito.verify(conn).addRequestProperty(Mockito.eq("Cookie"), Mockito.anyString());
   }
 
+  @Test
   public void testExtractTokenOK() throws Exception {
     HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
 
@@ -87,9 +90,10 @@ public class TestAuthenticatedURL extends TestCase {
     AuthenticatedURL.Token token = new AuthenticatedURL.Token();
     AuthenticatedURL.extractToken(conn, token);
 
-    assertEquals(tokenStr, token.toString());
+    Assert.assertEquals(tokenStr, token.toString());
   }
 
+  @Test
   public void testExtractTokenFail() throws Exception {
     HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
 
@@ -106,15 +110,16 @@ public class TestAuthenticatedURL extends TestCase {
     token.set("bar");
     try {
       AuthenticatedURL.extractToken(conn, token);
-      fail();
+      Assert.fail();
     } catch (AuthenticationException ex) {
       // Expected
       Assert.assertFalse(token.isSet());
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     }
   }
 
+  @Test
   public void testConnectionConfigurator() throws Exception {
     HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
     Mockito.when(conn.getResponseCode()).

+ 43 - 17
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java

@@ -13,17 +13,33 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 
+import java.io.File;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 
-public class TestKerberosAuthenticator extends AuthenticatorTestCase {
+public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
+
+  @Before
+  public void setup() throws Exception {
+    // create keytab
+    File keytabFile = new File(KerberosTestUtils.getKeytabFile());
+    String clientPrincipal = KerberosTestUtils.getClientPrincipal();
+    String serverPrincipal = KerberosTestUtils.getServerPrincipal();
+    clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@"));
+    serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@"));
+    getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal);
+  }
 
   private Properties getAuthenticationHandlerConfiguration() {
     Properties props = new Properties();
@@ -35,57 +51,67 @@ public class TestKerberosAuthenticator extends AuthenticatorTestCase {
     return props;
   }
 
+  @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticator() throws Exception {
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
-    setAuthenticationHandlerConfig(props);
-    _testAuthentication(new KerberosAuthenticator(), false);
+    auth.setAuthenticationHandlerConfig(props);
+    auth._testAuthentication(new KerberosAuthenticator(), false);
   }
 
+  @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
-    setAuthenticationHandlerConfig(props);
-    _testAuthentication(new KerberosAuthenticator(), false);
+    auth.setAuthenticationHandlerConfig(props);
+    auth._testAuthentication(new KerberosAuthenticator(), false);
   }
 
+  @Test(timeout=60000)
   public void testNotAuthenticated() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
-    start();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
+    auth.start();
     try {
-      URL url = new URL(getBaseURL());
+      URL url = new URL(auth.getBaseURL());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.connect();
-      assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
-      assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null);
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+      Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null);
     } finally {
-      stop();
+      auth.stop();
     }
   }
 
-
+  @Test(timeout=60000)
   public void testAuthentication() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
       @Override
       public Void call() throws Exception {
-        _testAuthentication(new KerberosAuthenticator(), false);
+        auth._testAuthentication(new KerberosAuthenticator(), false);
         return null;
       }
     });
   }
 
+  @Test(timeout=60000)
   public void testAuthenticationPost() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
       @Override
       public Void call() throws Exception {
-        _testAuthentication(new KerberosAuthenticator(), true);
+        auth._testAuthentication(new KerberosAuthenticator(), true);
         return null;
       }
     });
   }
-
 }

+ 41 - 20
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java

@@ -15,12 +15,14 @@ package org.apache.hadoop.security.authentication.client;
 
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.junit.Assert;
+import org.junit.Test;
 
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.util.Properties;
 
-public class TestPseudoAuthenticator extends AuthenticatorTestCase {
+public class TestPseudoAuthenticator {
 
   private Properties getAuthenticationHandlerConfiguration(boolean anonymousAllowed) {
     Properties props = new Properties();
@@ -29,55 +31,74 @@ public class TestPseudoAuthenticator extends AuthenticatorTestCase {
     return props;
   }
 
+  @Test
   public void testGetUserName() throws Exception {
     PseudoAuthenticator authenticator = new PseudoAuthenticator();
-    assertEquals(System.getProperty("user.name"), authenticator.getUserName());
+    Assert.assertEquals(System.getProperty("user.name"), authenticator.getUserName());
   }
 
+  @Test
   public void testAnonymousAllowed() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true));
-    start();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(true));
+    auth.start();
     try {
-      URL url = new URL(getBaseURL());
+      URL url = new URL(auth.getBaseURL());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.connect();
-      assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
     } finally {
-      stop();
+      auth.stop();
     }
   }
 
+  @Test
   public void testAnonymousDisallowed() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false));
-    start();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(false));
+    auth.start();
     try {
-      URL url = new URL(getBaseURL());
+      URL url = new URL(auth.getBaseURL());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.connect();
-      assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
     } finally {
-      stop();
+      auth.stop();
     }
   }
 
+  @Test
   public void testAuthenticationAnonymousAllowed() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true));
-    _testAuthentication(new PseudoAuthenticator(), false);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(true));
+    auth._testAuthentication(new PseudoAuthenticator(), false);
   }
 
+  @Test
   public void testAuthenticationAnonymousDisallowed() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false));
-    _testAuthentication(new PseudoAuthenticator(), false);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(false));
+    auth._testAuthentication(new PseudoAuthenticator(), false);
   }
 
+  @Test
   public void testAuthenticationAnonymousAllowedWithPost() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true));
-    _testAuthentication(new PseudoAuthenticator(), true);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(true));
+    auth._testAuthentication(new PseudoAuthenticator(), true);
   }
 
+  @Test
   public void testAuthenticationAnonymousDisallowedWithPost() throws Exception {
-    setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false));
-    _testAuthentication(new PseudoAuthenticator(), true);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    auth.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration(false));
+    auth._testAuthentication(new PseudoAuthenticator(), true);
   }
 
 }

+ 11 - 6
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java

@@ -18,6 +18,8 @@ import java.util.Properties;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.junit.Assert;
+import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestAltKerberosAuthenticationHandler
@@ -45,6 +47,7 @@ public class TestAltKerberosAuthenticationHandler
     return AltKerberosAuthenticationHandler.TYPE;
   }
 
+  @Test(timeout=60000)
   public void testAlternateAuthenticationAsBrowser() throws Exception {
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
@@ -54,11 +57,12 @@ public class TestAltKerberosAuthenticationHandler
     Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser");
 
     AuthenticationToken token = handler.authenticate(request, response);
-    assertEquals("A", token.getUserName());
-    assertEquals("B", token.getName());
-    assertEquals(getExpectedType(), token.getType());
+    Assert.assertEquals("A", token.getUserName());
+    Assert.assertEquals("B", token.getName());
+    Assert.assertEquals(getExpectedType(), token.getType());
   }
 
+  @Test(timeout=60000)
   public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception {
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
@@ -81,11 +85,12 @@ public class TestAltKerberosAuthenticationHandler
     Mockito.when(request.getHeader("User-Agent")).thenReturn("blah");
     // Should use alt authentication
     AuthenticationToken token = handler.authenticate(request, response);
-    assertEquals("A", token.getUserName());
-    assertEquals("B", token.getName());
-    assertEquals(getExpectedType(), token.getType());
+    Assert.assertEquals("A", token.getUserName());
+    Assert.assertEquals("B", token.getName());
+    Assert.assertEquals(getExpectedType(), token.getType());
   }
 
+  @Test(timeout=60000)
   public void testNonDefaultNonBrowserUserAgentAsNonBrowser() throws Exception {
     if (handler != null) {
       handler.destroy();

+ 64 - 49
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -16,7 +16,8 @@ package org.apache.hadoop.security.authentication.server;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
-import junit.framework.TestCase;
+import org.junit.Assert;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -34,8 +35,9 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.Vector;
 
-public class TestAuthenticationFilter extends TestCase {
+public class TestAuthenticationFilter {
 
+  @Test
   public void testGetConfiguration() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     FilterConfig config = Mockito.mock(FilterConfig.class);
@@ -43,27 +45,28 @@ public class TestAuthenticationFilter extends TestCase {
     Mockito.when(config.getInitParameter("a")).thenReturn("A");
     Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("a")).elements());
     Properties props = filter.getConfiguration("", config);
-    assertEquals("A", props.getProperty("a"));
+    Assert.assertEquals("A", props.getProperty("a"));
 
     config = Mockito.mock(FilterConfig.class);
     Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
     Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
     Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("foo.a")).elements());
     props = filter.getConfiguration("foo.", config);
-    assertEquals("A", props.getProperty("a"));
+    Assert.assertEquals("A", props.getProperty("a"));
   }
 
+  @Test
   public void testInitEmpty() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>().elements());
       filter.init(config);
-      fail();
+      Assert.fail();
     } catch (ServletException ex) {
       // Expected
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     } finally {
       filter.destroy();
     }
@@ -126,6 +129,7 @@ public class TestAuthenticationFilter extends TestCase {
     }
   }
 
+  @Test
   public void testInit() throws Exception {
 
     // minimal configuration & simple auth handler (Pseudo)
@@ -138,11 +142,11 @@ public class TestAuthenticationFilter extends TestCase {
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       filter.init(config);
-      assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
-      assertTrue(filter.isRandomSecret());
-      assertNull(filter.getCookieDomain());
-      assertNull(filter.getCookiePath());
-      assertEquals(1000, filter.getValidity());
+      Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
+      Assert.assertTrue(filter.isRandomSecret());
+      Assert.assertNull(filter.getCookieDomain());
+      Assert.assertNull(filter.getCookiePath());
+      Assert.assertEquals(1000, filter.getValidity());
     } finally {
       filter.destroy();
     }
@@ -157,7 +161,7 @@ public class TestAuthenticationFilter extends TestCase {
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       filter.init(config);
-      assertFalse(filter.isRandomSecret());
+      Assert.assertFalse(filter.isRandomSecret());
     } finally {
       filter.destroy();
     }
@@ -174,13 +178,12 @@ public class TestAuthenticationFilter extends TestCase {
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_PATH)).elements());
       filter.init(config);
-      assertEquals(".foo.com", filter.getCookieDomain());
-      assertEquals("/bar", filter.getCookiePath());
+      Assert.assertEquals(".foo.com", filter.getCookieDomain());
+      Assert.assertEquals("/bar", filter.getCookiePath());
     } finally {
       filter.destroy();
     }
 
-
     // authentication handler lifecycle, and custom impl
     DummyAuthenticationHandler.reset();
     filter = new AuthenticationFilter();
@@ -195,10 +198,10 @@ public class TestAuthenticationFilter extends TestCase {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
       filter.init(config);
-      assertTrue(DummyAuthenticationHandler.init);
+      Assert.assertTrue(DummyAuthenticationHandler.init);
     } finally {
       filter.destroy();
-      assertTrue(DummyAuthenticationHandler.destroy);
+      Assert.assertTrue(DummyAuthenticationHandler.destroy);
     }
 
     // kerberos auth handler
@@ -212,11 +215,12 @@ public class TestAuthenticationFilter extends TestCase {
     } catch (ServletException ex) {
       // Expected
     } finally {
-      assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
+      Assert.assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       filter.destroy();
     }
   }
 
+  @Test
   public void testGetRequestURL() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -235,12 +239,13 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
       Mockito.when(request.getQueryString()).thenReturn("a=A&b=B");
 
-      assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request));
+      Assert.assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request));
     } finally {
       filter.destroy();
     }
   }
 
+  @Test
   public void testGetToken() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -268,12 +273,13 @@ public class TestAuthenticationFilter extends TestCase {
 
       AuthenticationToken newToken = filter.getToken(request);
 
-      assertEquals(token.toString(), newToken.toString());
+      Assert.assertEquals(token.toString(), newToken.toString());
     } finally {
       filter.destroy();
     }
   }
 
+  @Test
   public void testGetTokenExpired() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -300,17 +306,18 @@ public class TestAuthenticationFilter extends TestCase {
 
       try {
         filter.getToken(request);
-        fail();
+        Assert.fail();
       } catch (AuthenticationException ex) {
         // Expected
       } catch (Exception ex) {
-        fail();
+        Assert.fail();
       }
     } finally {
       filter.destroy();
     }
   }
 
+  @Test
   public void testGetTokenInvalidType() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -338,17 +345,18 @@ public class TestAuthenticationFilter extends TestCase {
 
       try {
         filter.getToken(request);
-        fail();
+        Assert.fail();
       } catch (AuthenticationException ex) {
         // Expected
       } catch (Exception ex) {
-        fail();
+        Assert.fail();
       }
     } finally {
       filter.destroy();
     }
   }
 
+  @Test
   public void testDoFilterNotAuthenticated() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -374,7 +382,7 @@ public class TestAuthenticationFilter extends TestCase {
         new Answer<Object>() {
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
-            fail();
+            Assert.fail();
             return null;
           }
         }
@@ -468,27 +476,27 @@ public class TestAuthenticationFilter extends TestCase {
         Mockito.verify(response, Mockito.never()).
           addCookie(Mockito.any(Cookie.class));
       } else {
-        assertNotNull(setCookie[0]);
-        assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
-        assertTrue(setCookie[0].getValue().contains("u="));
-        assertTrue(setCookie[0].getValue().contains("p="));
-        assertTrue(setCookie[0].getValue().contains("t="));
-        assertTrue(setCookie[0].getValue().contains("e="));
-        assertTrue(setCookie[0].getValue().contains("s="));
-        assertTrue(calledDoFilter[0]);
+        Assert.assertNotNull(setCookie[0]);
+        Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
+        Assert.assertTrue(setCookie[0].getValue().contains("u="));
+        Assert.assertTrue(setCookie[0].getValue().contains("p="));
+        Assert.assertTrue(setCookie[0].getValue().contains("t="));
+        Assert.assertTrue(setCookie[0].getValue().contains("e="));
+        Assert.assertTrue(setCookie[0].getValue().contains("s="));
+        Assert.assertTrue(calledDoFilter[0]);
 
         Signer signer = new Signer("secret".getBytes());
         String value = signer.verifyAndExtract(setCookie[0].getValue());
         AuthenticationToken token = AuthenticationToken.parse(value);
-        assertEquals(System.currentTimeMillis() + 1000 * 1000,
+        Assert.assertEquals(System.currentTimeMillis() + 1000 * 1000,
                      token.getExpires(), 100);
 
         if (withDomainPath) {
-          assertEquals(".foo.com", setCookie[0].getDomain());
-          assertEquals("/bar", setCookie[0].getPath());
+          Assert.assertEquals(".foo.com", setCookie[0].getDomain());
+          Assert.assertEquals("/bar", setCookie[0].getPath());
         } else {
-          assertNull(setCookie[0].getDomain());
-          assertNull(setCookie[0].getPath());
+          Assert.assertNull(setCookie[0].getDomain());
+          Assert.assertNull(setCookie[0].getPath());
         }
       }
     } finally {
@@ -496,22 +504,27 @@ public class TestAuthenticationFilter extends TestCase {
     }
   }
 
+  @Test
   public void testDoFilterAuthentication() throws Exception {
     _testDoFilterAuthentication(false, false, false);
   }
 
+  @Test
   public void testDoFilterAuthenticationImmediateExpiration() throws Exception {
     _testDoFilterAuthentication(false, false, true);
   }
 
+  @Test
   public void testDoFilterAuthenticationWithInvalidToken() throws Exception {
     _testDoFilterAuthentication(false, true, false);
   }
 
+  @Test
   public void testDoFilterAuthenticationWithDomainPath() throws Exception {
     _testDoFilterAuthentication(true, false, false);
   }
 
+  @Test
   public void testDoFilterAuthenticated() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -547,8 +560,8 @@ public class TestAuthenticationFilter extends TestCase {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             Object[] args = invocation.getArguments();
             HttpServletRequest request = (HttpServletRequest) args[0];
-            assertEquals("u", request.getRemoteUser());
-            assertEquals("p", request.getUserPrincipal().getName());
+            Assert.assertEquals("u", request.getRemoteUser());
+            Assert.assertEquals("p", request.getUserPrincipal().getName());
             return null;
           }
         }
@@ -561,6 +574,7 @@ public class TestAuthenticationFilter extends TestCase {
     }
   }
 
+  @Test
   public void testDoFilterAuthenticatedExpired() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -594,7 +608,7 @@ public class TestAuthenticationFilter extends TestCase {
         new Answer<Object>() {
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
-            fail();
+            Assert.fail();
             return null;
           }
         }
@@ -616,15 +630,15 @@ public class TestAuthenticationFilter extends TestCase {
 
       Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
-      assertNotNull(setCookie[0]);
-      assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
-      assertEquals("", setCookie[0].getValue());
+      Assert.assertNotNull(setCookie[0]);
+      Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
+      Assert.assertEquals("", setCookie[0].getValue());
     } finally {
       filter.destroy();
     }
   }
 
-
+  @Test
   public void testDoFilterAuthenticatedInvalidType() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
@@ -658,7 +672,7 @@ public class TestAuthenticationFilter extends TestCase {
         new Answer<Object>() {
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
-            fail();
+            Assert.fail();
             return null;
           }
         }
@@ -680,14 +694,15 @@ public class TestAuthenticationFilter extends TestCase {
 
       Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
-      assertNotNull(setCookie[0]);
-      assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
-      assertEquals("", setCookie[0].getValue());
+      Assert.assertNotNull(setCookie[0]);
+      Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
+      Assert.assertEquals("", setCookie[0].getValue());
     } finally {
       filter.destroy();
     }
   }
 
+  @Test
   public void testManagementOperation() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {

+ 39 - 33
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java

@@ -14,98 +14,104 @@
 package org.apache.hadoop.security.authentication.server;
 
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import junit.framework.TestCase;
+import org.junit.Assert;
+import org.junit.Test;
 
-public class TestAuthenticationToken extends TestCase {
+public class TestAuthenticationToken {
 
+  @Test
   public void testAnonymous() {
-    assertNotNull(AuthenticationToken.ANONYMOUS);
-    assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName());
-    assertEquals(null, AuthenticationToken.ANONYMOUS.getName());
-    assertEquals(null, AuthenticationToken.ANONYMOUS.getType());
-    assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires());
-    assertFalse(AuthenticationToken.ANONYMOUS.isExpired());
+    Assert.assertNotNull(AuthenticationToken.ANONYMOUS);
+    Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName());
+    Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getName());
+    Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getType());
+    Assert.assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires());
+    Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired());
   }
 
+  @Test
   public void testConstructor() throws Exception {
     try {
       new AuthenticationToken(null, "p", "t");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       new AuthenticationToken("", "p", "t");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       new AuthenticationToken("u", null, "t");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       new AuthenticationToken("u", "", "t");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       new AuthenticationToken("u", "p", null);
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       new AuthenticationToken("u", "p", "");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     new AuthenticationToken("u", "p", "t");
   }
 
+  @Test
   public void testGetters() throws Exception {
     long expires = System.currentTimeMillis() + 50;
     AuthenticationToken token = new AuthenticationToken("u", "p", "t");
     token.setExpires(expires);
-    assertEquals("u", token.getUserName());
-    assertEquals("p", token.getName());
-    assertEquals("t", token.getType());
-    assertEquals(expires, token.getExpires());
-    assertFalse(token.isExpired());
+    Assert.assertEquals("u", token.getUserName());
+    Assert.assertEquals("p", token.getName());
+    Assert.assertEquals("t", token.getType());
+    Assert.assertEquals(expires, token.getExpires());
+    Assert.assertFalse(token.isExpired());
     Thread.sleep(51);
-    assertTrue(token.isExpired());
+    Assert.assertTrue(token.isExpired());
   }
 
+  @Test
   public void testToStringAndParse() throws Exception {
     long expires = System.currentTimeMillis() + 50;
     AuthenticationToken token = new AuthenticationToken("u", "p", "t");
     token.setExpires(expires);
     String str = token.toString();
     token = AuthenticationToken.parse(str);
-    assertEquals("p", token.getName());
-    assertEquals("t", token.getType());
-    assertEquals(expires, token.getExpires());
-    assertFalse(token.isExpired());
+    Assert.assertEquals("p", token.getName());
+    Assert.assertEquals("t", token.getType());
+    Assert.assertEquals(expires, token.getExpires());
+    Assert.assertFalse(token.isExpired());
     Thread.sleep(51);
-    assertTrue(token.isExpired());
+    Assert.assertTrue(token.isExpired());
   }
 
+  @Test
   public void testParseInvalid() throws Exception {
     long expires = System.currentTimeMillis() + 50;
     AuthenticationToken token = new AuthenticationToken("u", "p", "t");
@@ -114,11 +120,11 @@ public class TestAuthenticationToken extends TestCase {
     str = str.substring(0, str.indexOf("e="));
     try {
       AuthenticationToken.parse(str);
-      fail();
+      Assert.fail();
     } catch (AuthenticationException ex) {
       // Expected
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     }
   }
 }

+ 45 - 31
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -13,25 +13,31 @@
  */
 package org.apache.hadoop.security.authentication.server;
 
+import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import junit.framework.TestCase;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSName;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.ietf.jgss.Oid;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import java.io.File;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 
-public class TestKerberosAuthenticationHandler extends TestCase {
+public class TestKerberosAuthenticationHandler
+    extends KerberosSecurityTestcase {
 
   protected KerberosAuthenticationHandler handler;
 
@@ -54,9 +60,16 @@ public class TestKerberosAuthenticationHandler extends TestCase {
     return props;
   }
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setup() throws Exception {
+    // create keytab
+    File keytabFile = new File(KerberosTestUtils.getKeytabFile());
+    String clientPrincipal = KerberosTestUtils.getClientPrincipal();
+    String serverPrincipal = KerberosTestUtils.getServerPrincipal();
+    clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@"));
+    serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@"));
+    getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal);
+    // handler
     handler = getNewAuthenticationHandler();
     Properties props = getDefaultProperties();
     try {
@@ -67,18 +80,10 @@ public class TestKerberosAuthenticationHandler extends TestCase {
     }
   }
 
-  @Override
-  protected void tearDown() throws Exception {
-    if (handler != null) {
-      handler.destroy();
-      handler = null;
-    }
-    super.tearDown();
-  }
-
+  @Test(timeout=60000)
   public void testNameRules() throws Exception {
     KerberosName kn = new KerberosName(KerberosTestUtils.getServerPrincipal());
-    assertEquals(KerberosTestUtils.getRealm(), kn.getRealm());
+    Assert.assertEquals(KerberosTestUtils.getRealm(), kn.getRealm());
 
     //destroy handler created in setUp()
     handler.destroy();
@@ -93,30 +98,32 @@ public class TestKerberosAuthenticationHandler extends TestCase {
     } catch (Exception ex) {
     }
     kn = new KerberosName("bar@BAR");
-    assertEquals("bar", kn.getShortName());
+    Assert.assertEquals("bar", kn.getShortName());
     kn = new KerberosName("bar@FOO");
     try {
       kn.getShortName();
-      fail();
+      Assert.fail();
     }
     catch (Exception ex) {      
     }
   }
-  
+
+  @Test(timeout=60000)
   public void testInit() throws Exception {
-    assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
-    assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
+    Assert.assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
+    Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
   }
 
+  @Test(timeout=60000)
   public void testType() throws Exception {
-    assertEquals(getExpectedType(), handler.getType());
+    Assert.assertEquals(getExpectedType(), handler.getType());
   }
 
   public void testRequestWithoutAuthorization() throws Exception {
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
-    assertNull(handler.authenticate(request, response));
+    Assert.assertNull(handler.authenticate(request, response));
     Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
     Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
   }
@@ -126,11 +133,12 @@ public class TestKerberosAuthenticationHandler extends TestCase {
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
     Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION)).thenReturn("invalid");
-    assertNull(handler.authenticate(request, response));
+    Assert.assertNull(handler.authenticate(request, response));
     Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
     Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
   }
 
+  @Test(timeout=60000)
   public void testRequestWithIncompleteAuthorization() throws Exception {
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
@@ -139,15 +147,14 @@ public class TestKerberosAuthenticationHandler extends TestCase {
       .thenReturn(KerberosAuthenticator.NEGOTIATE);
     try {
       handler.authenticate(request, response);
-      fail();
+      Assert.fail();
     } catch (AuthenticationException ex) {
       // Expected
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     }
   }
 
-
   public void testRequestWithAuthorization() throws Exception {
     String token = KerberosTestUtils.doAsClient(new Callable<String>() {
       @Override
@@ -191,9 +198,9 @@ public class TestKerberosAuthenticationHandler extends TestCase {
                                          Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));
       Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
 
-      assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName());
-      assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName()));
-      assertEquals(getExpectedType(), authToken.getType());
+      Assert.assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName());
+      Assert.assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName()));
+      Assert.assertEquals(getExpectedType(), authToken.getType());
     } else {
       Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
                                          Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));
@@ -213,12 +220,19 @@ public class TestKerberosAuthenticationHandler extends TestCase {
 
     try {
       handler.authenticate(request, response);
-      fail();
+      Assert.fail();
     } catch (AuthenticationException ex) {
       // Expected
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     }
   }
 
+  @After
+  public void tearDown() throws Exception {
+    if (handler != null) {
+      handler.destroy();
+      handler = null;
+    }
+  }
 }

+ 18 - 11
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java

@@ -14,33 +14,37 @@
 package org.apache.hadoop.security.authentication.server;
 
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import junit.framework.TestCase;
 import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
+import org.junit.Assert;
+import org.junit.Test;
 import org.mockito.Mockito;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import java.util.Properties;
 
-public class TestPseudoAuthenticationHandler extends TestCase {
+public class TestPseudoAuthenticationHandler {
 
+  @Test
   public void testInit() throws Exception {
     PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
     try {
       Properties props = new Properties();
       props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
       handler.init(props);
-      assertEquals(false, handler.getAcceptAnonymous());
+      Assert.assertEquals(false, handler.getAcceptAnonymous());
     } finally {
       handler.destroy();
     }
   }
 
+  @Test
   public void testType() throws Exception {
     PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
-    assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType());
+    Assert.assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType());
   }
 
+  @Test
   public void testAnonymousOn() throws Exception {
     PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
     try {
@@ -53,12 +57,13 @@ public class TestPseudoAuthenticationHandler extends TestCase {
 
       AuthenticationToken token = handler.authenticate(request, response);
 
-      assertEquals(AuthenticationToken.ANONYMOUS, token);
+      Assert.assertEquals(AuthenticationToken.ANONYMOUS, token);
     } finally {
       handler.destroy();
     }
   }
 
+  @Test
   public void testAnonymousOff() throws Exception {
     PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
     try {
@@ -70,11 +75,11 @@ public class TestPseudoAuthenticationHandler extends TestCase {
       HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
       handler.authenticate(request, response);
-      fail();
+      Assert.fail();
     } catch (AuthenticationException ex) {
       // Expected
     } catch (Exception ex) {
-      fail();
+      Assert.fail();
     } finally {
       handler.destroy();
     }
@@ -93,19 +98,21 @@ public class TestPseudoAuthenticationHandler extends TestCase {
 
       AuthenticationToken token = handler.authenticate(request, response);
 
-      assertNotNull(token);
-      assertEquals("user", token.getUserName());
-      assertEquals("user", token.getName());
-      assertEquals(PseudoAuthenticationHandler.TYPE, token.getType());
+      Assert.assertNotNull(token);
+      Assert.assertEquals("user", token.getUserName());
+      Assert.assertEquals("user", token.getName());
+      Assert.assertEquals(PseudoAuthenticationHandler.TYPE, token.getType());
     } finally {
       handler.destroy();
     }
   }
 
+  @Test
   public void testUserNameAnonymousOff() throws Exception {
     _testUserName(false);
   }
 
+  @Test
   public void testUserNameAnonymousOn() throws Exception {
     _testUserName(true);
   }

+ 15 - 4
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -21,14 +21,19 @@ package org.apache.hadoop.security.authentication.util;
 import java.io.IOException;
 
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
+
+import org.junit.Assert;
 
 public class TestKerberosName {
 
   @Before
   public void setUp() throws Exception {
+    System.setProperty("java.security.krb5.realm", KerberosTestUtils.getRealm());
+    System.setProperty("java.security.krb5.kdc", "localhost:88");
+
     String rules =
       "RULE:[1:$1@$0](.*@YAHOO\\.COM)s/@.*//\n" +
       "RULE:[2:$1](johndoe)s/^.*$/guest/\n" +
@@ -44,7 +49,7 @@ public class TestKerberosName {
     KerberosName nm = new KerberosName(from);
     String simple = nm.getShortName();
     System.out.println("to " + simple);
-    assertEquals("short name incorrect", to, simple);
+    Assert.assertEquals("short name incorrect", to, simple);
   }
 
   @Test
@@ -61,7 +66,7 @@ public class TestKerberosName {
     System.out.println("Checking " + name + " to ensure it is bad.");
     try {
       new KerberosName(name);
-      fail("didn't get exception for " + name);
+      Assert.fail("didn't get exception for " + name);
     } catch (IllegalArgumentException iae) {
       // PASS
     }
@@ -72,7 +77,7 @@ public class TestKerberosName {
     KerberosName nm = new KerberosName(from);
     try {
       nm.getShortName();
-      fail("didn't get exception for " + from);
+      Assert.fail("didn't get exception for " + from);
     } catch (IOException ie) {
       // PASS
     }
@@ -85,4 +90,10 @@ public class TestKerberosName {
     checkBadTranslation("foo@ACME.COM");
     checkBadTranslation("root/joe@FOO.COM");
   }
+
+  @After
+  public void clear() {
+    System.clearProperty("java.security.krb5.realm");
+    System.clearProperty("java.security.krb5.kdc");
+  }
 }

+ 6 - 7
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java

@@ -16,11 +16,10 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
-import static org.junit.Assert.*;
+import org.junit.Assert;
 
 import java.io.IOException;
 
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.junit.Test;
 
 public class TestKerberosUtil {
@@ -32,23 +31,23 @@ public class TestKerberosUtil {
     String testHost = "FooBar";
 
     // send null hostname
-    assertEquals("When no hostname is sent",
+    Assert.assertEquals("When no hostname is sent",
         service + "/" + localHostname.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, null));
     // send empty hostname
-    assertEquals("When empty hostname is sent",
+    Assert.assertEquals("When empty hostname is sent",
         service + "/" + localHostname.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, ""));
     // send 0.0.0.0 hostname
-    assertEquals("When 0.0.0.0 hostname is sent",
+    Assert.assertEquals("When 0.0.0.0 hostname is sent",
         service + "/" + localHostname.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, "0.0.0.0"));
     // send uppercase hostname
-    assertEquals("When uppercase hostname is sent",
+    Assert.assertEquals("When uppercase hostname is sent",
         service + "/" + testHost.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, testHost));
     // send lowercase hostname
-    assertEquals("When lowercase hostname is sent",
+    Assert.assertEquals("When lowercase hostname is sent",
         service + "/" + testHost.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, testHost.toLowerCase()));
   }

+ 21 - 15
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java

@@ -13,68 +13,75 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
-import junit.framework.TestCase;
+import org.junit.Assert;
+import org.junit.Test;
 
-public class TestSigner extends TestCase {
+public class TestSigner {
 
+  @Test
   public void testNoSecret() throws Exception {
     try {
       new Signer(null);
-      fail();
+      Assert.fail();
     }
     catch (IllegalArgumentException ex) {
     }
   }
 
+  @Test
   public void testNullAndEmptyString() throws Exception {
     Signer signer = new Signer("secret".getBytes());
     try {
       signer.sign(null);
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
     try {
       signer.sign("");
-      fail();
+      Assert.fail();
     } catch (IllegalArgumentException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
   }
 
+  @Test
   public void testSignature() throws Exception {
     Signer signer = new Signer("secret".getBytes());
     String s1 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s3 = signer.sign("wrong");
-    assertEquals(s1, s2);
-    assertNotSame(s1, s3);
+    Assert.assertEquals(s1, s2);
+    Assert.assertNotSame(s1, s3);
   }
 
+  @Test
   public void testVerify() throws Exception {
     Signer signer = new Signer("secret".getBytes());
     String t = "test";
     String s = signer.sign(t);
     String e = signer.verifyAndExtract(s);
-    assertEquals(t, e);
+    Assert.assertEquals(t, e);
   }
 
+  @Test
   public void testInvalidSignedText() throws Exception {
     Signer signer = new Signer("secret".getBytes());
     try {
       signer.verifyAndExtract("test");
-      fail();
+      Assert.fail();
     } catch (SignerException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
   }
 
+  @Test
   public void testTampering() throws Exception {
     Signer signer = new Signer("secret".getBytes());
     String t = "test";
@@ -82,12 +89,11 @@ public class TestSigner extends TestCase {
     s += "x";
     try {
       signer.verifyAndExtract(s);
-      fail();
+      Assert.fail();
     } catch (SignerException ex) {
       // Expected
     } catch (Throwable ex) {
-      fail();
+      Assert.fail();
     }
   }
-
 }

+ 316 - 32
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -1,5 +1,245 @@
 Hadoop Change Log
 
+Release 2.4.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+    HADOOP-8545. Filesystem Implementation for OpenStack Swift
+    (Dmitry Mezhensky, David Dobbins, Stevel via stevel)
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu)
+
+    HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem.
+    (Junping Du via llu)
+
+    HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu)
+
+    HADOOP-9432 Add support for markdown .md files in site documentation (stevel)
+
+    HADOOP-9241. DU refresh interval is not configurable (harsh)
+
+    HADOOP-9417.  Support for symlink resolution in LocalFileSystem /
+    RawLocalFileSystem.  (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-9703.  org.apache.hadoop.ipc.Client leaks threads on stop.
+    (Tsuyoshi OZAWA vi Colin Patrick McCabe)
+
+    HADOOP-9618.  Add thread which detects GC pauses.
+    (Todd Lipcon via Colin Patrick McCabe)
+
+    HADOOP-9848. Create a MiniKDC for use with security testing. 
+    (ywskycn via tucu)
+
+    HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from 
+    hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu)
+
+    HADOOP-9866. convert hadoop-auth testcases requiring kerberos to 
+    use minikdc. (ywskycn via tucu)
+
+    HADOOP-9487 Deprecation warnings in Configuration should go to their
+    own log or otherwise be suppressible (Chu Tong via stevel)
+
+    HADOOP-9889. Refresh the Krb5 configuration when creating a new kdc in
+    Hadoop-MiniKDC (Wei Yan via Sandy Ryza)
+
+    HADOOP-9915.  o.a.h.fs.Stat support on Mac OS X  (Binglin Chang via Colin
+    Patrick McCabe)
+
+    HADOOP-9998.  Provide methods to clear only part of the DNSToSwitchMapping.
+    (Junping Du via Colin Patrick McCabe)
+
+    HADOOP-9063. enhance unit-test coverage of class
+    org.apache.hadoop.fs.FileUtil (Ivan A. Veselovsky via jlowe)
+
+    HADOOP-9254. Cover packages org.apache.hadoop.util.bloom,
+    org.apache.hadoop.util.hash (Vadim Bondarev via jlowe)
+
+    HADOOP-9225. Cover package org.apache.hadoop.compress.Snappy (Vadim
+    Bondarev, Andrey Klochkov and Nathan Roberts via jlowe)
+
+    HADOOP-9199. Cover package org.apache.hadoop.io with unit tests (Andrey
+    Klochkov via jeagles)
+
+    HADOOP-9470. eliminate duplicate FQN tests in different Hadoop modules
+    (Ivan A. Veselovsky via daryn)
+
+    HADOOP-9494. Excluded auto-generated and examples code from clover reports
+    (Andrey Klochkov via jeagles)
+
+    HADOOP-9897. Add method to get path start position without drive specifier in
+    o.a.h.fs.Path. (Binglin Chang via cnauroth)
+
+    HADOOP-9078. enhance unit-test coverage of class
+    org.apache.hadoop.fs.FileContext (Ivan A. Veselovsky via jeagles)
+
+    HDFS-5276. FileSystem.Statistics should use thread-local counters to avoid
+    multi-threaded performance issues on read/write.  (Colin Patrick McCabe)
+
+    HADOOP-9291. enhance unit-test coverage of package o.a.h.metrics2 (Ivan A.
+    Veselovsky via jeagles)
+
+    HADOOP-10064. Upgrade to maven antrun plugin version 1.7 (Arpit Agarwal via
+    jeagles)
+
+    HADOOP-9594. Update apache commons math dependency (Timothy St. Clair via
+    stevel)
+
+    HADOOP-10095. In CodecPool, synchronize pool and codecList separately in
+    order to reduce lock contention.  (Nicolas Liochon via szetszwo)
+
+    HADOOP-10067. Missing POM dependency on jsr305 (Robert Rati via stevel)
+
+    HADOOP-10103. update commons-lang to 2.6 (Akira AJISAKA via stevel)
+
+    HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
+    via jeagles)
+
+    HADOOP-10126. LightWeightGSet log message is confusing. (Vinay via suresh)
+
+    HADOOP-10127. Add ipc.client.connect.retry.interval to control the frequency
+    of connection retries (Karthik Kambatla via Sandy Ryza)
+
+    HADOOP-10102. Update commons IO from 2.1 to 2.4 (Akira Ajisaka via stevel)
+
+    HADOOP-10168. fix javadoc of ReflectionUtils#copy. (Thejas Nair via suresh)
+
+    HADOOP-10164. Allow UGI to login with a known Subject (bobby)
+
+    HADOOP-10169. Remove the unnecessary synchronized in JvmMetrics class.
+    (Liang Xie via jing9) 
+
+    HADOOP-10198. DomainSocket: add support for socketpair.
+    (Colin Patrick McCabe via wang)
+
+    HADOOP-10208. Remove duplicate initialization in StringUtils.getStringCollection.
+    (Benoy Antony via jing9)
+
+    HADOOP-9420. Add percentile or max metric for rpcQueueTime, processing time.
+    (Liang Xie via wang)
+
+    HADOOP-9652. Allow RawLocalFs#getFileLinkStatus to fill in the link owner
+    and mode if requested. (Andrew Wang via Colin Patrick McCabe)
+
+  OPTIMIZATIONS
+
+    HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
+
+    HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V
+    via acmurthy)
+
+    HADOOP-10172. Cache SASL server factories (daryn)
+
+    HADOOP-10173. Remove UGI from DIGEST-MD5 SASL server creation (daryn via
+    kihwal)
+
+    HADOOP-10228. FsPermission#fromShort() should cache FsAction.values().
+    (Haohui Mai via cnauroth)
+
+    HADOOP-10143 replace WritableFactories's hashmap with ConcurrentHashMap
+    (Liang Xie via stack)
+
+  BUG FIXES
+
+    HADOOP-9964. Fix deadlocks in TestHttpServer by synchronize
+    ReflectionUtils.printThreadInfo. (Junping Du via llu)
+
+    HADOOP-9582. Non-existent file to "hadoop fs -conf" doesn't throw error
+    (Ashwin Shankar via jlowe)
+
+    HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work
+    with symlinks. (Colin Patrick McCabe via Andrew Wang)
+
+    HADOOP-9875.  TestDoAsEffectiveUser can fail on JDK 7.  (Aaron T. Myers via
+    Colin Patrick McCabe)
+
+    HADOOP-9865.  FileContext#globStatus has a regression with respect to
+    relative path.  (Chuan Lin via Colin Patrick McCabe)
+
+    HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG.
+    (Shinichi Yamashita via Andrew Wang)
+
+    HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd)
+
+    HADOOP-9929. Insufficient permissions for a path reported as File Not
+    Found.  (Contributed by Colin Patrick McCabe)
+
+    HADOOP-9791. Add a test case covering long paths for new FileUtil access
+    check methods (ivanmi)
+
+    HADOOP-9981. globStatus should minimize its listStatus and getFileStatus
+    calls.  (Contributed by Colin Patrick McCabe)
+
+    HADOOP-10006. Compilation failure in trunk for
+    o.a.h.fs.swift.util.JSONUtil (Junping Du via stevel)
+
+    HADOOP-9016. HarFsInputStream.skip(long) must never return negative value.
+    (Ivan A. Veselovsky via jeagles)
+
+    HADOOP-10088. copy-nativedistlibs.sh needs to quote snappy lib dir.
+    (Raja Aluri via cnauroth)
+
+    HADOOP-10093. hadoop-env.cmd sets HADOOP_CLIENT_OPTS with a max heap size
+    that is too small. (Shanyu Zhao via cnauroth)
+
+    HADOOP-10094. NPE in GenericOptionsParser#preProcessForWindows().
+    (Enis Soztutar via cnauroth)
+
+    HADOOP-10100. MiniKDC shouldn't use apacheds-all artifact. (rkanter via tucu)
+
+    HADOOP-10107. Server.getNumOpenConnections may throw NPE. (Kihwal Lee via
+    jing9)
+
+    HADOOP-10135 writes to swift fs over partition size leave temp files and
+    empty output file (David Dobbins via stevel)
+
+    HADOOP-10129. Distcp may succeed when it fails (daryn)
+
+    HADOOP-10058. TestMetricsSystemImpl#testInitFirstVerifyStopInvokedImmediately
+    fails on trunk (Chen He via jeagles)
+
+    HADOOP-10162. Fix symlink-related test failures in
+    TestFileContextResolveAfs and TestStat in branch-2 (Mit Desai via Colin
+    Patrick McCabe)
+
+    HADOOP-8753. LocalDirAllocator throws "ArithmeticException: / by zero" when
+    there is no available space on configured local dir. (Benoy Antony via hitesh)
+
+    HADOOP-10106. Incorrect thread name in RPC log messages. (Ming Ma via jing9)
+
+    HADOOP-9611 mvn-rpmbuild against google-guice > 3.0 yields missing cglib
+    dependency (Timothy St. Clair via stevel)
+
+    HADOOP-10171. TestRPC fails intermittently on jkd7 (Mit Desai via jeagles)
+
+    HADOOP-10147  HDFS-5678 Upgrade to commons-logging 1.1.3 to avoid potential
+    deadlock in MiniDFSCluster (stevel)
+
+    HADOOP-10207. TestUserGroupInformation#testLogin is flaky (jxiang via
+    cmccabe)
+
+    HADOOP-10214. Fix multithreaded correctness warnings in ActiveStandbyElector
+    (Liang Xie via kasha)
+
+    HADOOP-10223. MiniKdc#main() should close the FileReader it creates. 
+    (Ted Yu via tucu)
+
+    HADOOP-10236. Fix typo in o.a.h.ipc.Client#checkResponse. (Akira Ajisaka
+    via suresh)
+
+    HADOOP-10146. Workaround JDK7 Process fd close bug (daryn)
+
+    HADOOP-10125. no need to process RPC request if the client connection
+    has been dropped (Ming Ma via brandonli)
+
+    HADOOP-10235. Hadoop tarball has 2 versions of stax-api JARs. (tucu)
+
+    HADOOP-10252. HttpServer can't start if hostname is not specified. (Jimmy
+    Xiang via atm)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -20,6 +260,9 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
     is encountered (Ted yu via umamahesh)
 
+    HADOOP-10248. Property name should be included in the exception where property value 
+    is null (Akira AJISAKA via umamahesh)
+
   OPTIMIZATIONS
 
     HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
@@ -44,9 +287,6 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
     (Akira Ajisaka via cnauroth)
 
-    HADOOP-10052. Temporarily disable client-side symlink resolution
-    (branch-2.2 only change). (wang)
-
     HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
     timing around cache expiry check. (cnauroth)
 
@@ -82,6 +322,9 @@ Release 2.3.0 - UNRELEASED
     HADOOP-10175. Har files system authority should preserve userinfo.
     (Chuan Liu via cnauroth)
 
+    HADOOP-10090. Jobtracker metrics not updated properly after execution
+    of a mapreduce job. (ivanmi)
+
     HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream. 
     (gchanan via tucu)
 
@@ -105,14 +348,10 @@ Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES
 
-    HADOOP-10020. Disable symlinks temporarily (branch-2.1-beta only change)
-    (sanjay via suresh)
+    HADOOP-10020. Disable symlinks temporarily (cmccabe)
 
   NEW FEATURES
 
-    HDFS-4817.  Make HDFS advisory caching configurable on a per-file basis.
-    (Contributed by Colin Patrick McCabe)
-
   IMPROVEMENTS
 
     HADOOP-9948. Add a config value to CLITestHelper to skip tests on Windows.
@@ -121,8 +360,8 @@ Release 2.2.0 - 2013-10-13
     HADOOP-9976. Different versions of avro and avro-maven-plugin (Karthik
     Kambatla via Sandy Ryza)
 
-    HADOOP-9758. Provide configuration option for FileSystem/FileContext
-    symlink resolution (Andrew Wang via Colin Patrick McCabe)
+    HADOOP-9758.  Provide configuration option for FileSystem/FileContext
+    symlink resolution.  (Andrew Wang via Colin Patrick McCabe)
 
     HADOOP-8315. Support SASL-authenticated ZooKeeper in ActiveStandbyElector
     (todd)
@@ -158,30 +397,29 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9910. proxy server start and stop documentation wrong
     (Andre Kelpe via harsh)
 
+    HADOOP-9446. Support Kerberos SPNEGO for IBM JDK. (Yu Gao via llu)
+
     HADOOP-9787. ShutdownHelper util to shutdown threads and threadpools.
     (Karthik Kambatla via Sandy Ryza)
 
     HADOOP-9803. Add a generic type parameter to RetryInvocationHandler.
     (szetszwo)
 
-    HADOOP-9821. ClientId should have getMsb/getLsb methods.
+    HADOOP-9821. ClientId should have getMsb/getLsb methods. 
     (Tsuyoshi OZAWA via jing9)
 
     HADOOP-9435.  Support building the JNI code against the IBM JVM.
     (Tian Hong Wang via Colin Patrick McCabe)
 
-    HADOOP-9355.  Abstract symlink tests to use either FileContext or
-    FileSystem.  (Andrew Wang via Colin Patrick McCabe)
-
     HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel)
 
     HADOOP-9672. Upgrade Avro dependency to 1.7.4. (sandy via kihwal)
 
+    HADOOP-9789. Support server advertised kerberos principals (daryn)
+
     HADOOP-8814. Replace string equals "" by String#isEmpty().
     (Brandon Li via suresh)
 
-    HADOOP-9789. Support server advertised kerberos principals (daryn)
-
     HADOOP-9802. Support Snappy codec on Windows. (cnauroth)
 
     HADOOP-9879. Move the version info of zookeeper dependencies to
@@ -201,9 +439,6 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9962. in order to avoid dependency divergence within Hadoop itself 
     lets enable DependencyConvergence. (rvs via tucu)
 
-    HADOOP-9487 Deprecation warnings in Configuration should go to their
-    own log or otherwise be suppressible (Chu Tong via stevel)
-
     HADOOP-9669. Reduce the number of byte array creations and copies in
     XDR data manipulation. (Haohui Mai via brandonli)
 
@@ -222,6 +457,9 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9806 PortmapInterface should check if the procedure is out-of-range
     (brandonli)
 
+    HADOOP-9527. Add symlink support to LocalFileSystem on Windows.
+    (Arpit Agarwal via cnauroth)
+
     HADOOP-9315. Port HADOOP-9249 hadoop-maven-plugins Clover fix to branch-2 to
     fix build failures. (Dennis Y via cnauroth)
 
@@ -238,9 +476,6 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9857. Tests block and sometimes timeout on Windows due to invalid
     entropy source. (cnauroth)
 
-    HADOOP-9527. Add symlink support to LocalFileSystem on Windows.
-    (Arpit Agarwal)
-
     HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh)
 
     HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal)
@@ -248,6 +483,9 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. (daryn via
     jing9)
 
+    HADOOP-9887. globStatus does not correctly handle paths starting with a drive
+    spec on Windows. (Chuan Liu via cnauroth)
+
     HADOOP-9899. Remove the debug message, added by HADOOP-8855, from
     KerberosAuthenticator.  (szetszwo)
 
@@ -437,6 +675,9 @@ Release 2.1.0-beta - 2013-08-22
     HADOOP-9370.  Write FSWrapper class to wrap FileSystem and FileContext for
     better test coverage.  (Andrew Wang via Colin Patrick McCabe)
 
+    HADOOP-9355.  Abstract symlink tests to use either FileContext or
+    FileSystem.  (Andrew Wang via Colin Patrick McCabe)
+
     HADOOP-9673.  NetworkTopology: when a node can't be added, print out its
     location for diagnostic purposes.  (Colin Patrick McCabe)
 
@@ -474,6 +715,9 @@ Release 2.1.0-beta - 2013-08-22
     HADOOP-9786. RetryInvocationHandler#isRpcInvocation should support 
     ProtocolTranslator. (suresh and jing9)
 
+    HADOOP-9847. TestGlobPath symlink tests fail to cleanup properly.
+    (cmccabe via wang)
+
   OPTIMIZATIONS
 
     HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
@@ -618,8 +862,8 @@ Release 2.1.0-beta - 2013-08-22
     HADOOP-9624. TestFSMainOperationsLocalFileSystem failed when the Hadoop test
     root path has "X" in its name. (Xi Fang via cnauroth)
 
-    HADOOP-9439.  JniBasedUnixGroupsMapping: fix some crash bugs. (Colin
-    Patrick McCabe)
+    HADOOP-9439.  JniBasedUnixGroupsMapping: fix some crash bugs (Colin Patrick
+    McCabe)
 
     HADOOP-9656. Gridmix unit tests fail on Windows and Linux. (Chuan Liu via
     cnauroth)
@@ -1353,6 +1597,12 @@ Release 2.0.2-alpha - 2012-09-07
     hostname in token instead of IP to allow server IP change. 
     (Daryn Sharp via suresh)
 
+    HADOOP-8367 Improve documentation of declaringClassProtocolName in 
+    rpc headers. (Sanjay Radia)
+
+    HADOOP-8624. ProtobufRpcEngine should log all RPCs if TRACE logging is
+    enabled (todd)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -1720,12 +1970,6 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the 
     client side translator and server side implementation. (jitendra)
 
-    HADOOP-8367 Improve documentation of declaringClassProtocolName in 
-    rpc headers. (Sanjay Radia)
-
-    HADOOP-8624. ProtobufRpcEngine should log all RPCs if TRACE logging is
-    enabled (todd)
-
   OPTIMIZATIONS
 
   BUG FIXES
@@ -1947,7 +2191,21 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
     by Jersey (Alejandro Abdelnur via atm)
 
-Release 0.23.9 - UNRELEASED
+Release 0.23.11 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+    
+  IMPROVEMENTS
+    
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-10129. Distcp may succeed when it fails (daryn)
+
+Release 0.23.10 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
@@ -1955,12 +2213,38 @@ Release 0.23.9 - UNRELEASED
 
   IMPROVEMENTS
 
+    HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe
+    via jeagles)
+
+    HADOOP-8704. add request logging to jetty/httpserver (jeagles)
+
   OPTIMIZATIONS
 
+    HADOOP-9956. RPC listener inefficiently assigns connections to readers (daryn)
+
+    HADOOP-9955. RPC idle connection closing is extremely inefficient (daryn)
+
   BUG FIXES
 
-Release 0.23.8 - 2013-06-05
+Release 0.23.9 - 2013-07-08
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
 
+  BUG FIXES
+
+    HADOOP-9581. hadoop --config non-existent directory should result in error
+    (Ashwin Shankar via jlowe)
+
+    HADOOP-9582. Non-existent file to "hadoop fs -conf" doesn't throw error
+    (Ashwin Shankar via jlowe)
+
+Release 0.23.8 - 2013-06-05
 
   INCOMPATIBLE CHANGES
 

+ 11 - 5
hadoop-common-project/hadoop-common/LICENSE.txt

@@ -252,24 +252,26 @@ in src/main/native/src/org/apache/hadoop/util:
  *   BSD-style license that can be found in the LICENSE file.
  */
 
- For src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c:
+For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,
+lz4_encoder.h,lz4hc.h,lz4hc.c,lz4hc_encoder.h},
 
 /*
    LZ4 - Fast LZ compression algorithm
-   Copyright (C) 2011, Yann Collet.
-   BSD License
+   Header File
+   Copyright (C) 2011-2013, Yann Collet.
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
    Redistribution and use in source and binary forms, with or without
    modification, are permitted provided that the following conditions are
    met:
-  
+
        * Redistributions of source code must retain the above copyright
    notice, this list of conditions and the following disclaimer.
        * Redistributions in binary form must reproduce the above
    copyright notice, this list of conditions and the following disclaimer
    in the documentation and/or other materials provided with the
    distribution.
-  
+
    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -281,4 +283,8 @@ in src/main/native/src/org/apache/hadoop/util:
    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+   - LZ4 source repository : http://code.google.com/p/lz4/
 */

+ 17 - 1
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -348,4 +348,20 @@
        <Method name="waitForServiceToStop" />
        <Bug code="JLM" />
      </Match>
- </FindBugsFilter>
+
+  <!--
+  OpenStack Swift FS module -closes streams in a different method
+  from where they are opened.
+  -->
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFileAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFilePartAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+
+</FindBugsFilter>

+ 19 - 6
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.3.0-SNAPSHOT</version>
+    <version>2.4.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
-  <version>2.3.0-SNAPSHOT</version>
+  <version>2.4.0-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
@@ -53,7 +53,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
-      <artifactId>commons-math</artifactId>
+      <artifactId>commons-math3</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
@@ -81,6 +81,11 @@
       <artifactId>commons-net</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
       <artifactId>servlet-api</artifactId>
@@ -213,6 +218,11 @@
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>jsr305</artifactId>
+      <scope>compile</scope>
+    </dependency>
 
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
@@ -451,7 +461,12 @@
             <exclude>src/test/empty-file</exclude>
             <exclude>src/test/all-tests</exclude>
             <exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4_encoder.h</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
             <exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
             <exclude>src/test/resources/test.har/_SUCCESS</exclude>
             <exclude>src/test/resources/test.har/_index</exclude>
@@ -703,9 +718,7 @@
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-surefire-plugin</artifactId>
             <configuration>
-              <forkMode>perthread</forkMode>
-              <threadCount>${testsThreadCount}</threadCount>
-              <parallel>classes</parallel>
+              <forkCount>${testsThreadCount}</forkCount>
               <argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
             </configuration>
           </plugin>

+ 1 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -170,6 +170,7 @@ add_dual_library(hadoop
     ${D}/io/compress/lz4/Lz4Compressor.c
     ${D}/io/compress/lz4/Lz4Decompressor.c
     ${D}/io/compress/lz4/lz4.c
+    ${D}/io/compress/lz4/lz4hc.c
     ${SNAPPY_SOURCE_FILES}
     ${D}/io/compress/zlib/ZlibCompressor.c
     ${D}/io/compress/zlib/ZlibDecompressor.c

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.cmd

@@ -58,7 +58,7 @@ set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OP
 set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
 
 @rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS%
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
 @rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
 
 @rem On secure datanodes, user to run the datanode as after dropping privileges

+ 36 - 0
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -229,3 +229,39 @@ log4j.appender.RMSUMMARY.MaxFileSize=256MB
 log4j.appender.RMSUMMARY.MaxBackupIndex=20
 log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3

+ 13 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -957,7 +957,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         "Property name must not be null");
     Preconditions.checkArgument(
         value != null,
-        "Property value must not be null");
+        "The value of property " + name + " must not be null");
     DeprecationContext deprecations = deprecationContext.get();
     if (deprecations.getDeprecatedKeyMap().isEmpty()) {
       getProps();
@@ -2072,6 +2072,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
+  /**
+   * Get the set of parameters marked final.
+   *
+   * @return final parameter set.
+   */
+  public Set<String> getFinalParameters() {
+    return new HashSet<String>(finalParameters);
+  }
+
   protected synchronized Properties getProps() {
     if (properties == null) {
       properties = new Properties();
@@ -2131,7 +2140,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private Document parse(DocumentBuilder builder, URL url)
       throws IOException, SAXException {
     if (!quietmode) {
-      LOG.info("parsing URL " + url);
+      LOG.debug("parsing URL " + url);
     }
     if (url == null) {
       return null;
@@ -2142,7 +2151,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private Document parse(DocumentBuilder builder, InputStream is,
       String systemId) throws IOException, SAXException {
     if (!quietmode) {
-      LOG.info("parsing input stream " + is);
+      LOG.debug("parsing input stream " + is);
     }
     if (is == null) {
       return null;
@@ -2215,7 +2224,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           .getAbsoluteFile();
         if (file.exists()) {
           if (!quiet) {
-            LOG.info("parsing File " + file);
+            LOG.debug("parsing File " + file);
           }
           doc = parse(builder, new BufferedInputStream(
               new FileInputStream(file)), ((Path)resource).toString());

+ 118 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java

@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+/**
+ * A RemoteIterator that fetches elements in batches.
+ */
+public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
+  public interface BatchedEntries<E> {
+    public E get(int i);
+    public int size();
+    public boolean hasMore();
+  }
+
+  public static class BatchedListEntries<E> implements BatchedEntries<E> {
+    private final List<E> entries;
+    private final boolean hasMore;
+
+    public BatchedListEntries(List<E> entries, boolean hasMore) {
+      this.entries = entries;
+      this.hasMore = hasMore;
+    }
+
+    public E get(int i) {
+      return entries.get(i);
+    }
+
+    public int size() {
+      return entries.size();
+    }
+
+    public boolean hasMore() {
+      return hasMore;
+    }
+  }
+
+  private K prevKey;
+  private BatchedEntries<E> entries;
+  private int idx;
+
+  public BatchedRemoteIterator(K prevKey) {
+    this.prevKey = prevKey;
+    this.entries = null;
+    this.idx = -1;
+  }
+
+  /**
+   * Perform the actual remote request.
+   * 
+   * @param prevKey The key to send.
+   * @return A list of replies.
+   */
+  public abstract BatchedEntries<E> makeRequest(K prevKey) throws IOException;
+
+  private void makeRequest() throws IOException {
+    idx = 0;
+    entries = null;
+    entries = makeRequest(prevKey);
+    if (entries.size() == 0) {
+      entries = null;
+    }
+  }
+
+  private void makeRequestIfNeeded() throws IOException {
+    if (idx == -1) {
+      makeRequest();
+    } else if ((entries != null) && (idx >= entries.size())) {
+      if (!entries.hasMore()) {
+        // Last time, we got fewer entries than requested.
+        // So we should be at the end.
+        entries = null;
+      } else {
+        makeRequest();
+      }
+    }
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    makeRequestIfNeeded();
+    return (entries != null);
+  }
+
+  /**
+   * Return the next list key associated with an element.
+   */
+  public abstract K elementToPrevKey(E element);
+
+  @Override
+  public E next() throws IOException {
+    makeRequestIfNeeded();
+    if (entries == null) {
+      throw new NoSuchElementException();
+    }
+    E entry = entries.get(idx++);
+    prevKey = elementToPrevKey(entry);
+    return entry;
+  }
+}

+ 66 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java

@@ -31,17 +31,33 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Stable
 public class BlockLocation {
   private String[] hosts; // Datanode hostnames
+  private String[] cachedHosts; // Datanode hostnames with a cached replica
   private String[] names; // Datanode IP:xferPort for accessing the block
   private String[] topologyPaths; // Full path name in network topology
   private long offset;  // Offset of the block in the file
   private long length;
   private boolean corrupt;
 
+  private static final String[] EMPTY_STR_ARRAY = new String[0];
+
   /**
    * Default Constructor
    */
   public BlockLocation() {
-    this(new String[0], new String[0],  0L, 0L);
+    this(EMPTY_STR_ARRAY, EMPTY_STR_ARRAY, 0L, 0L);
+  }
+
+  /**
+   * Copy constructor
+   */
+  public BlockLocation(BlockLocation that) {
+    this.hosts = that.hosts;
+    this.cachedHosts = that.cachedHosts;
+    this.names = that.names;
+    this.topologyPaths = that.topologyPaths;
+    this.offset = that.offset;
+    this.length = that.length;
+    this.corrupt = that.corrupt;
   }
 
   /**
@@ -57,20 +73,7 @@ public class BlockLocation {
    */
   public BlockLocation(String[] names, String[] hosts, long offset, 
                        long length, boolean corrupt) {
-    if (names == null) {
-      this.names = new String[0];
-    } else {
-      this.names = names;
-    }
-    if (hosts == null) {
-      this.hosts = new String[0];
-    } else {
-      this.hosts = hosts;
-    }
-    this.offset = offset;
-    this.length = length;
-    this.topologyPaths = new String[0];
-    this.corrupt = corrupt;
+    this(names, hosts, null, offset, length, corrupt);
   }
 
   /**
@@ -87,34 +90,55 @@ public class BlockLocation {
    */
   public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
                        long offset, long length, boolean corrupt) {
-    this(names, hosts, offset, length, corrupt);
+    this(names, hosts, null, topologyPaths, offset, length, corrupt);
+  }
+
+  public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
+      String[] topologyPaths, long offset, long length, boolean corrupt) {
+    if (names == null) {
+      this.names = EMPTY_STR_ARRAY;
+    } else {
+      this.names = names;
+    }
+    if (hosts == null) {
+      this.hosts = EMPTY_STR_ARRAY;
+    } else {
+      this.hosts = hosts;
+    }
+    if (cachedHosts == null) {
+      this.cachedHosts = EMPTY_STR_ARRAY;
+    } else {
+      this.cachedHosts = cachedHosts;
+    }
     if (topologyPaths == null) {
-      this.topologyPaths = new String[0];
+      this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
       this.topologyPaths = topologyPaths;
     }
+    this.offset = offset;
+    this.length = length;
+    this.corrupt = corrupt;
   }
 
   /**
    * Get the list of hosts (hostname) hosting this block
    */
   public String[] getHosts() throws IOException {
-    if (hosts == null || hosts.length == 0) {
-      return new String[0];
-    } else {
-      return hosts;
-    }
+    return hosts;
+  }
+
+  /**
+   * Get the list of hosts (hostname) hosting a cached replica of the block
+   */
+  public String[] getCachedHosts() {
+   return cachedHosts;
   }
 
   /**
    * Get the list of names (IP:xferPort) hosting this block
    */
   public String[] getNames() throws IOException {
-    if (names == null || names.length == 0) {
-      return new String[0];
-    } else {
-      return names;
-    }
+    return names;
   }
 
   /**
@@ -122,11 +146,7 @@ public class BlockLocation {
    * The last component of the path is the "name" (IP:xferPort).
    */
   public String[] getTopologyPaths() throws IOException {
-    if (topologyPaths == null || topologyPaths.length == 0) {
-      return new String[0];
-    } else {
-      return topologyPaths;
-    }
+    return topologyPaths;
   }
   
   /**
@@ -176,18 +196,29 @@ public class BlockLocation {
    */
   public void setHosts(String[] hosts) throws IOException {
     if (hosts == null) {
-      this.hosts = new String[0];
+      this.hosts = EMPTY_STR_ARRAY;
     } else {
       this.hosts = hosts;
     }
   }
 
+  /**
+   * Set the hosts hosting a cached replica of this block
+   */
+  public void setCachedHosts(String[] cachedHosts) {
+    if (cachedHosts == null) {
+      this.cachedHosts = EMPTY_STR_ARRAY;
+    } else {
+      this.cachedHosts = cachedHosts;
+    }
+  }
+
   /**
    * Set the names (host:port) hosting this block
    */
   public void setNames(String[] names) throws IOException {
     if (names == null) {
-      this.names = new String[0];
+      this.names = EMPTY_STR_ARRAY;
     } else {
       this.names = names;
     }
@@ -198,7 +229,7 @@ public class BlockLocation {
    */
   public void setTopologyPaths(String[] topologyPaths) throws IOException {
     if (topologyPaths == null) {
-      this.topologyPaths = new String[0];
+      this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
       this.topologyPaths = topologyPaths;
     }

+ 113 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.ByteBufferPool;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class ByteBufferUtil {
+
+  /**
+   * Determine if a stream can do a byte buffer read via read(ByteBuffer buf)
+   */
+  private static boolean streamHasByteBufferRead(InputStream stream) {
+    if (!(stream instanceof ByteBufferReadable)) {
+      return false;
+    }
+    if (!(stream instanceof FSDataInputStream)) {
+      return true;
+    }
+    return ((FSDataInputStream)stream).getWrappedStream() 
+        instanceof ByteBufferReadable;
+  }
+
+  /**
+   * Perform a fallback read.
+   */
+  public static ByteBuffer fallbackRead(
+      InputStream stream, ByteBufferPool bufferPool, int maxLength)
+          throws IOException {
+    if (bufferPool == null) {
+      throw new UnsupportedOperationException("zero-copy reads " +
+          "were not available, and you did not provide a fallback " +
+          "ByteBufferPool.");
+    }
+    boolean useDirect = streamHasByteBufferRead(stream);
+    ByteBuffer buffer = bufferPool.getBuffer(useDirect, maxLength);
+    if (buffer == null) {
+      throw new UnsupportedOperationException("zero-copy reads " +
+          "were not available, and the ByteBufferPool did not provide " +
+          "us with " + (useDirect ? "a direct" : "an indirect") +
+          "buffer.");
+    }
+    Preconditions.checkState(buffer.capacity() > 0);
+    Preconditions.checkState(buffer.isDirect() == useDirect);
+    maxLength = Math.min(maxLength, buffer.capacity());
+    boolean success = false;
+    try {
+      if (useDirect) {
+        buffer.clear();
+        buffer.limit(maxLength);
+        ByteBufferReadable readable = (ByteBufferReadable)stream;
+        int totalRead = 0;
+        while (true) {
+          if (totalRead >= maxLength) {
+            success = true;
+            break;
+          }
+          int nRead = readable.read(buffer);
+          if (nRead < 0) {
+            if (totalRead > 0) {
+              success = true;
+            }
+            break;
+          }
+          totalRead += nRead;
+        }
+        buffer.flip();
+      } else {
+        buffer.clear();
+        int nRead = stream.read(buffer.array(),
+            buffer.arrayOffset(), maxLength);
+        if (nRead >= 0) {
+          buffer.limit(nRead);
+          success = true;
+        }
+      }
+    } finally {
+      if (!success) {
+        // If we got an error while reading, or if we are at EOF, we 
+        // don't need the buffer any more.  We can give it back to the
+        // bufferPool.
+        bufferPool.putBuffer(buffer);
+        buffer = null;
+      }
+    }
+    return buffer;
+  }
+}

+ 30 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
-import org.apache.hadoop.security.authorize.Service;
 
 /** 
  * This class contains constants for configuration keys used
@@ -65,6 +64,13 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   /** Default value for IPC_SERVER_RPC_READ_THREADS_KEY */
   public static final int     IPC_SERVER_RPC_READ_THREADS_DEFAULT = 1;
   
+  /** Number of pending connections that may be queued per socket reader */
+  public static final String IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY =
+      "ipc.server.read.connection-queue.size";
+  /** Default value for IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE */
+  public static final int IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT =
+      100;
+      
   public static final String IPC_MAXIMUM_DATA_LENGTH =
       "ipc.maximum.data.length";
   
@@ -96,7 +102,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
       256 * 1024;
 
-  /** Internal buffer size for Snappy compressor/decompressors */
+  /** Internal buffer size for Lz4 compressor/decompressors */
   public static final String IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY =
       "io.compression.codec.lz4.buffersize";
 
@@ -104,6 +110,14 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final int IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT =
       256 * 1024;
 
+  /** Use lz4hc(slow but with high compression ratio) for lz4 compression */
+  public static final String IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY =
+      "io.compression.codec.lz4.use.lz4hc";
+
+  /** Default value for IO_COMPRESSION_CODEC_USELZ4HC_KEY */
+  public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT =
+      false;
+
   /**
    * Service Authorization
    */
@@ -219,4 +233,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = "ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 
+  /** How often the server scans for idle connections */
+  public static final String IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY =
+      "ipc.client.connection.idle-scan-interval.ms";
+  /** Default value for IPC_SERVER_CONNECTION_IDLE_SCAN_INTERVAL_KEY */
+  public static final int IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT =
+      10000;
+
+  public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
+    "hadoop.user.group.metrics.percentiles.intervals";
+
+  public static final String RPC_METRICS_QUANTILE_ENABLE =
+      "rpc.metrics.quantile.enable";
+  public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
+      "rpc.metrics.percentiles.intervals";
 }

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -55,11 +55,16 @@ public class CommonConfigurationKeysPublic {
   /** Default value for FS_DF_INTERVAL_KEY */
   public static final long    FS_DF_INTERVAL_DEFAULT = 60000;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  FS_DU_INTERVAL_KEY = "fs.du.interval";
+  /** Default value for FS_DU_INTERVAL_KEY */
+  public static final long    FS_DU_INTERVAL_DEFAULT = 600000;
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY =
     "fs.client.resolve.remote.symlinks";
   /** Default value for FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY */
   public static final boolean FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT = true;
 
+
   //Defaults are not specified for following keys
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY =
@@ -187,6 +192,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   public static final int     IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT = 10;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY =
+      "ipc.client.connect.retry.interval";
+  /** Default value for IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY */
+  public static final int     IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT = 1000;
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
     "ipc.client.connect.max.retries.on.timeouts";
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY */

+ 37 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 
 import java.io.BufferedReader;
@@ -46,17 +47,32 @@ public class DU extends Shell {
    * @throws IOException if we fail to refresh the disk usage
    */
   public DU(File path, long interval) throws IOException {
+    this(path, interval, -1L);
+  }
+  
+  /**
+   * Keeps track of disk usage.
+   * @param path the path to check disk usage in
+   * @param interval refresh the disk usage at this interval
+   * @param initialUsed use this value until next refresh
+   * @throws IOException if we fail to refresh the disk usage
+   */
+  public DU(File path, long interval, long initialUsed) throws IOException { 
     super(0);
-    
+
     //we set the Shell interval to 0 so it will always run our command
     //and use this one to set the thread sleep interval
     this.refreshInterval = interval;
     this.dirPath = path.getCanonicalPath();
-    
-    //populate the used variable
-    run();
+
+    //populate the used variable if the initial value is not specified.
+    if (initialUsed < 0) {
+      run();
+    } else {
+      this.used.set(initialUsed);
+    }
   }
-  
+
   /**
    * Keeps track of disk usage.
    * @param path the path to check disk usage in
@@ -64,10 +80,24 @@ public class DU extends Shell {
    * @throws IOException if we fail to refresh the disk usage
    */
   public DU(File path, Configuration conf) throws IOException {
-    this(path, 600000L);
-    //10 minutes default refresh interval
+    this(path, conf, -1L);
   }
 
+  /**
+   * Keeps track of disk usage.
+   * @param path the path to check disk usage in
+   * @param conf configuration object
+   * @param initialUsed use it until the next refresh.
+   * @throws IOException if we fail to refresh the disk usage
+   */
+  public DU(File path, Configuration conf, long initialUsed)
+      throws IOException {
+    this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
+                CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
+  }
+    
+  
+
   /**
    * This thread refreshes the "used" variable.
    * 

+ 11 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -113,7 +113,14 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
   @Override
   public FileStatus getFileLinkStatus(final Path f) throws IOException {
-    return getFileStatus(f);
+    FileStatus status = fsImpl.getFileLinkStatus(f);
+    // FileSystem#getFileLinkStatus qualifies the link target
+    // AbstractFileSystem needs to return it plain since it's qualified
+    // in FileContext, so re-get and set the plain target
+    if (status.isSymlink()) {
+      status.setSymlink(fsImpl.getLinkTarget(f));
+    }
+    return status;
   }
 
   @Override
@@ -199,22 +206,18 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
   @Override
   public boolean supportsSymlinks() {
-    return false;
+    return fsImpl.supportsSymlinks();
   }  
   
   @Override
   public void createSymlink(Path target, Path link, boolean createParent) 
       throws IOException { 
-    throw new IOException("File system does not support symlinks");
+    fsImpl.createSymlink(target, link, createParent);
   } 
   
   @Override
   public Path getLinkTarget(final Path f) throws IOException {
-    /* We should never get here. Any file system that threw an 
-     * UnresolvedLinkException, causing this function to be called,
-     * should override getLinkTarget. 
-     */
-    throw new AssertionError();
+    return fsImpl.getLinkTarget(f);
   }
 
   @Override //AbstractFileSystem

+ 56 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -1,4 +1,5 @@
 /**
+ * 
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -19,17 +20,29 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.nio.ByteBuffer;
+import java.util.EnumSet;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.fs.ByteBufferUtil;
+import org.apache.hadoop.util.IdentityHashStore;
 
 /** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
  * and buffers input through a {@link BufferedInputStream}. */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FSDataInputStream extends DataInputStream
-    implements Seekable, PositionedReadable, Closeable,
-    ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead {
+    implements Seekable, PositionedReadable, Closeable, 
+      ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
+      HasEnhancedByteBufferAccess {
+  /**
+   * Map ByteBuffers that we have handed out to readers to ByteBufferPool 
+   * objects
+   */
+  private final IdentityHashStore<ByteBuffer, ByteBufferPool>
+    extendedReadBuffers
+      = new IdentityHashStore<ByteBuffer, ByteBufferPool>(0);
 
   public FSDataInputStream(InputStream in)
     throws IOException {
@@ -167,4 +180,45 @@ public class FSDataInputStream extends DataInputStream
           "support setting the drop-behind caching setting.");
     }
   }
+
+  @Override
+  public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
+      EnumSet<ReadOption> opts) 
+          throws IOException, UnsupportedOperationException {
+    try {
+      return ((HasEnhancedByteBufferAccess)in).read(bufferPool,
+          maxLength, opts);
+    }
+    catch (ClassCastException e) {
+      ByteBuffer buffer = ByteBufferUtil.
+          fallbackRead(this, bufferPool, maxLength);
+      if (buffer != null) {
+        extendedReadBuffers.put(buffer, bufferPool);
+      }
+      return buffer;
+    }
+  }
+
+  private static final EnumSet<ReadOption> EMPTY_READ_OPTIONS_SET =
+      EnumSet.noneOf(ReadOption.class);
+
+  final public ByteBuffer read(ByteBufferPool bufferPool, int maxLength)
+          throws IOException, UnsupportedOperationException {
+    return read(bufferPool, maxLength, EMPTY_READ_OPTIONS_SET);
+  }
+  
+  @Override
+  public void releaseBuffer(ByteBuffer buffer) {
+    try {
+      ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
+    }
+    catch (ClassCastException e) {
+      ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer);
+      if (bufferPool == null) {
+        throw new IllegalArgumentException("tried to release a buffer " +
+            "that was not created by this stream.");
+      }
+      bufferPool.putBuffer(buffer);
+    }
+  }
 }

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -26,9 +26,8 @@ import java.io.OutputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
- * buffers output through a {@link BufferedOutputStream} and creates a checksum
- * file. */
+/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}.
+ */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FSDataOutputStream extends DataOutputStream

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java

@@ -18,9 +18,11 @@
 package org.apache.hadoop.fs;
 
 import java.io.*;
+import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ZeroCopyUnavailableException;
 
 /****************************************************************
  * FSInputStream is a generic old InputStream with a little bit

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java

@@ -76,7 +76,6 @@ public abstract class FSLinkResolver<T> {
    * @return Generic type determined by the implementation of next.
    * @throws IOException
    */
-  @SuppressWarnings("deprecation")
   public T resolve(final FileContext fc, final Path path) throws IOException {
     int count = 0;
     T in = null;
@@ -96,7 +95,7 @@ public abstract class FSLinkResolver<T> {
               + " and symlink resolution is disabled ("
               + CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY + ").", e);
         }
-        if (!FileSystem.isSymlinksEnabled()) {
+        if (!FileSystem.areSymlinksEnabled()) {
           throw new IOException("Symlink resolution is disabled in"
               + " this version of Hadoop.");
         }

+ 4 - 151
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -268,7 +268,7 @@ public final class FileContext {
    * Hence this method is not called makeAbsolute() and 
    * has been deliberately declared private.
    */
-  private Path fixRelativePart(Path p) {
+  Path fixRelativePart(Path p) {
     if (p.isUriPathAbsolute()) {
       return p;
     } else {
@@ -1345,7 +1345,7 @@ public final class FileContext {
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, UnsupportedFileSystemException, 
       IOException { 
-    if (!FileSystem.isSymlinksEnabled()) {
+    if (!FileSystem.areSymlinksEnabled()) {
       throw new UnsupportedOperationException("Symlinks not supported");
     }
     final Path nonRelLink = fixRelativePart(link);
@@ -1919,7 +1919,7 @@ public final class FileContext {
     public FileStatus[] globStatus(Path pathPattern)
         throws AccessControlException, UnsupportedFileSystemException,
         IOException {
-      return globStatus(pathPattern, DEFAULT_FILTER);
+      return new Globber(FileContext.this, pathPattern, DEFAULT_FILTER).glob();
     }
     
     /**
@@ -1948,154 +1948,7 @@ public final class FileContext {
     public FileStatus[] globStatus(final Path pathPattern,
         final PathFilter filter) throws AccessControlException,
         UnsupportedFileSystemException, IOException {
-      URI uri = getFSofPath(fixRelativePart(pathPattern)).getUri();
-
-      String filename = pathPattern.toUri().getPath();
-
-      List<String> filePatterns = GlobExpander.expand(filename);
-      if (filePatterns.size() == 1) {
-        Path absPathPattern = fixRelativePart(pathPattern);
-        return globStatusInternal(uri, new Path(absPathPattern.toUri()
-            .getPath()), filter);
-      } else {
-        List<FileStatus> results = new ArrayList<FileStatus>();
-        for (String iFilePattern : filePatterns) {
-          Path iAbsFilePattern = fixRelativePart(new Path(iFilePattern));
-          FileStatus[] files = globStatusInternal(uri, iAbsFilePattern, filter);
-          for (FileStatus file : files) {
-            results.add(file);
-          }
-        }
-        return results.toArray(new FileStatus[results.size()]);
-      }
-    }
-
-    /**
-     * 
-     * @param uri for all the inPathPattern
-     * @param inPathPattern - without the scheme & authority (take from uri)
-     * @param filter
-     *
-     * @return an array of FileStatus objects
-     *
-     * @throws AccessControlException If access is denied
-     * @throws IOException If an I/O error occurred
-     */
-    private FileStatus[] globStatusInternal(final URI uri,
-        final Path inPathPattern, final PathFilter filter)
-        throws AccessControlException, IOException
-      {
-      Path[] parents = new Path[1];
-      int level = 0;
-      
-      assert(inPathPattern.toUri().getScheme() == null &&
-          inPathPattern.toUri().getAuthority() == null && 
-          inPathPattern.isUriPathAbsolute());
-
-      
-      String filename = inPathPattern.toUri().getPath();
-      
-      // path has only zero component
-      if ("".equals(filename) || Path.SEPARATOR.equals(filename)) {
-        Path p = inPathPattern.makeQualified(uri, null);
-        return getFileStatus(new Path[]{p});
-      }
-
-      // path has at least one component
-      String[] components = filename.split(Path.SEPARATOR);
-      
-      // Path is absolute, first component is "/" hence first component
-      // is the uri root
-      parents[0] = new Path(new Path(uri), new Path("/"));
-      level = 1;
-
-      // glob the paths that match the parent path, ie. [0, components.length-1]
-      boolean[] hasGlob = new boolean[]{false};
-      Path[] relParentPaths = 
-        globPathsLevel(parents, components, level, hasGlob);
-      FileStatus[] results;
-      
-      if (relParentPaths == null || relParentPaths.length == 0) {
-        results = null;
-      } else {
-        // fix the pathes to be abs
-        Path[] parentPaths = new Path [relParentPaths.length]; 
-        for(int i=0; i<relParentPaths.length; i++) {
-          parentPaths[i] = relParentPaths[i].makeQualified(uri, null);
-        }
-        
-        // Now work on the last component of the path
-        GlobFilter fp = 
-                    new GlobFilter(components[components.length - 1], filter);
-        if (fp.hasPattern()) { // last component has a pattern
-          // list parent directories and then glob the results
-          try {
-            results = listStatus(parentPaths, fp);
-          } catch (FileNotFoundException e) {
-            results = null;
-          }
-          hasGlob[0] = true;
-        } else { // last component does not have a pattern
-          // get all the path names
-          ArrayList<Path> filteredPaths = 
-                                      new ArrayList<Path>(parentPaths.length);
-          for (int i = 0; i < parentPaths.length; i++) {
-            parentPaths[i] = new Path(parentPaths[i],
-              components[components.length - 1]);
-            if (fp.accept(parentPaths[i])) {
-              filteredPaths.add(parentPaths[i]);
-            }
-          }
-          // get all their statuses
-          results = getFileStatus(
-              filteredPaths.toArray(new Path[filteredPaths.size()]));
-        }
-      }
-
-      // Decide if the pathPattern contains a glob or not
-      if (results == null) {
-        if (hasGlob[0]) {
-          results = new FileStatus[0];
-        }
-      } else {
-        if (results.length == 0) {
-          if (!hasGlob[0]) {
-            results = null;
-          }
-        } else {
-          Arrays.sort(results);
-        }
-      }
-      return results;
-    }
-
-    /*
-     * For a path of N components, return a list of paths that match the
-     * components [<code>level</code>, <code>N-1</code>].
-     */
-    private Path[] globPathsLevel(Path[] parents, String[] filePattern,
-        int level, boolean[] hasGlob) throws AccessControlException,
-        FileNotFoundException, IOException {
-      if (level == filePattern.length - 1) {
-        return parents;
-      }
-      if (parents == null || parents.length == 0) {
-        return null;
-      }
-      GlobFilter fp = new GlobFilter(filePattern[level]);
-      if (fp.hasPattern()) {
-        try {
-          parents = FileUtil.stat2Paths(listStatus(parents, fp));
-        } catch (FileNotFoundException e) {
-          parents = null;
-        }
-        hasGlob[0] = true;
-      } else {
-        for (int i = 0; i < parents.length; i++) {
-          parents[i] = new Path(parents[i], filePattern[level]);
-        }
-      }
-      return globPathsLevel(parents, filePattern, level + 1, hasGlob);
+      return new Globber(FileContext.this, pathPattern, filter).glob();
     }
 
     /**

+ 286 - 158
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.lang.ref.WeakReference;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
@@ -31,6 +32,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.IdentityHashMap;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.NoSuchElementException;
@@ -1619,7 +1621,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * @throws IOException
    */
   public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    return globStatus(pathPattern, DEFAULT_FILTER);
+    return new Globber(this, pathPattern, DEFAULT_FILTER).glob();
   }
   
   /**
@@ -1638,126 +1640,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
       throws IOException {
-    String filename = pathPattern.toUri().getPath();
-    List<FileStatus> allMatches = null;
-    
-    List<String> filePatterns = GlobExpander.expand(filename);
-    for (String filePattern : filePatterns) {
-      Path path = new Path(filePattern.isEmpty() ? Path.CUR_DIR : filePattern);
-      List<FileStatus> matches = globStatusInternal(path, filter);
-      if (matches != null) {
-        if (allMatches == null) {
-          allMatches = matches;
-        } else {
-          allMatches.addAll(matches);
-        }
-      }
-    }
-    
-    FileStatus[] results = null;
-    if (allMatches != null) {
-      results = allMatches.toArray(new FileStatus[allMatches.size()]);
-    } else if (filePatterns.size() > 1) {
-      // no matches with multiple expansions is a non-matching glob 
-      results = new FileStatus[0];
-    }
-    return results;
-  }
-
-  // sort gripes because FileStatus Comparable isn't parameterized...
-  @SuppressWarnings("unchecked") 
-  private List<FileStatus> globStatusInternal(Path pathPattern,
-      PathFilter filter) throws IOException {
-    boolean patternHasGlob = false;       // pathPattern has any globs
-    List<FileStatus> matches = new ArrayList<FileStatus>();
-
-    // determine starting point
-    int level = 0;
-    String baseDir = Path.CUR_DIR;
-    if (pathPattern.isAbsolute()) {
-      level = 1; // need to skip empty item at beginning of split list
-      baseDir = Path.SEPARATOR;
-    }
-    
-    // parse components and determine if it's a glob
-    String[] components = null;
-    GlobFilter[] filters = null;
-    String filename = pathPattern.toUri().getPath();
-    if (!filename.isEmpty() && !Path.SEPARATOR.equals(filename)) {
-      components = filename.split(Path.SEPARATOR);
-      filters = new GlobFilter[components.length];
-      for (int i=level; i < components.length; i++) {
-        filters[i] = new GlobFilter(components[i]);
-        patternHasGlob |= filters[i].hasPattern();
-      }
-      if (!patternHasGlob) {
-        baseDir = unquotePathComponent(filename);
-        components = null; // short through to filter check
-      }
-    }
-    
-    // seed the parent directory path, return if it doesn't exist
-    try {
-      matches.add(getFileStatus(new Path(baseDir)));
-    } catch (FileNotFoundException e) {
-      return patternHasGlob ? matches : null;
-    }
-    
-    // skip if there are no components other than the basedir
-    if (components != null) {
-      // iterate through each path component
-      for (int i=level; (i < components.length) && !matches.isEmpty(); i++) {
-        List<FileStatus> children = new ArrayList<FileStatus>();
-        for (FileStatus match : matches) {
-          // don't look for children in a file matched by a glob
-          if (!match.isDirectory()) {
-            continue;
-          }
-          try {
-            if (filters[i].hasPattern()) {
-              // get all children matching the filter
-              FileStatus[] statuses = listStatus(match.getPath(), filters[i]);
-              children.addAll(Arrays.asList(statuses));
-            } else {
-              // the component does not have a pattern
-              String component = unquotePathComponent(components[i]);
-              Path child = new Path(match.getPath(), component);
-              children.add(getFileStatus(child));
-            }
-          } catch (FileNotFoundException e) {
-            // don't care
-          }
-        }
-        matches = children;
-      }
-    }
-    // remove anything that didn't match the filter
-    if (!matches.isEmpty()) {
-      Iterator<FileStatus> iter = matches.iterator();
-      while (iter.hasNext()) {
-        if (!filter.accept(iter.next().getPath())) {
-          iter.remove();
-        }
-      }
-    }
-    // no final paths, if there were any globs return empty list
-    if (matches.isEmpty()) {
-      return patternHasGlob ? matches : null;
-    }
-    Collections.sort(matches);
-    return matches;
-  }
-
-  /**
-   * The glob filter builds a regexp per path component.  If the component
-   * does not contain a shell metachar, then it falls back to appending the
-   * raw string to the list of built up paths.  This raw path needs to have
-   * the quoting removed.  Ie. convert all occurances of "\X" to "X"
-   * @param name of the path component
-   * @return the unquoted path component
-   */
-  private String unquotePathComponent(String name) {
-    return name.replaceAll("\\\\(.)", "$1");
+    return new Globber(this, pathPattern, filter).glob();
   }
   
   /**
@@ -2621,28 +2504,149 @@ public abstract class FileSystem extends Configured implements Closeable {
     }
   }
   
+  /**
+   * Tracks statistics about how many reads, writes, and so forth have been
+   * done in a FileSystem.
+   * 
+   * Since there is only one of these objects per FileSystem, there will 
+   * typically be many threads writing to this object.  Almost every operation
+   * on an open file will involve a write to this object.  In contrast, reading
+   * statistics is done infrequently by most programs, and not at all by others.
+   * Hence, this is optimized for writes.
+   * 
+   * Each thread writes to its own thread-local area of memory.  This removes 
+   * contention and allows us to scale up to many, many threads.  To read
+   * statistics, the reader thread totals up the contents of all of the 
+   * thread-local data areas.
+   */
   public static final class Statistics {
+    /**
+     * Statistics data.
+     * 
+     * There is only a single writer to thread-local StatisticsData objects.
+     * Hence, volatile is adequate here-- we do not need AtomicLong or similar
+     * to prevent lost updates.
+     * The Java specification guarantees that updates to volatile longs will
+     * be perceived as atomic with respect to other threads, which is all we
+     * need.
+     */
+    private static class StatisticsData {
+      volatile long bytesRead;
+      volatile long bytesWritten;
+      volatile int readOps;
+      volatile int largeReadOps;
+      volatile int writeOps;
+      /**
+       * Stores a weak reference to the thread owning this StatisticsData.
+       * This allows us to remove StatisticsData objects that pertain to
+       * threads that no longer exist.
+       */
+      final WeakReference<Thread> owner;
+
+      StatisticsData(WeakReference<Thread> owner) {
+        this.owner = owner;
+      }
+
+      /**
+       * Add another StatisticsData object to this one.
+       */
+      void add(StatisticsData other) {
+        this.bytesRead += other.bytesRead;
+        this.bytesWritten += other.bytesWritten;
+        this.readOps += other.readOps;
+        this.largeReadOps += other.largeReadOps;
+        this.writeOps += other.writeOps;
+      }
+
+      /**
+       * Negate the values of all statistics.
+       */
+      void negate() {
+        this.bytesRead = -this.bytesRead;
+        this.bytesWritten = -this.bytesWritten;
+        this.readOps = -this.readOps;
+        this.largeReadOps = -this.largeReadOps;
+        this.writeOps = -this.writeOps;
+      }
+
+      @Override
+      public String toString() {
+        return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
+            + readOps + " read ops, " + largeReadOps + " large read ops, "
+            + writeOps + " write ops";
+      }
+    }
+
+    private interface StatisticsAggregator<T> {
+      void accept(StatisticsData data);
+      T aggregate();
+    }
+
     private final String scheme;
-    private AtomicLong bytesRead = new AtomicLong();
-    private AtomicLong bytesWritten = new AtomicLong();
-    private AtomicInteger readOps = new AtomicInteger();
-    private AtomicInteger largeReadOps = new AtomicInteger();
-    private AtomicInteger writeOps = new AtomicInteger();
+
+    /**
+     * rootData is data that doesn't belong to any thread, but will be added
+     * to the totals.  This is useful for making copies of Statistics objects,
+     * and for storing data that pertains to threads that have been garbage
+     * collected.  Protected by the Statistics lock.
+     */
+    private final StatisticsData rootData;
+
+    /**
+     * Thread-local data.
+     */
+    private final ThreadLocal<StatisticsData> threadData;
     
+    /**
+     * List of all thread-local data areas.  Protected by the Statistics lock.
+     */
+    private LinkedList<StatisticsData> allData;
+
     public Statistics(String scheme) {
       this.scheme = scheme;
+      this.rootData = new StatisticsData(null);
+      this.threadData = new ThreadLocal<StatisticsData>();
+      this.allData = null;
     }
 
     /**
      * Copy constructor.
      * 
-     * @param st
-     *          The input Statistics object which is cloned.
+     * @param other    The input Statistics object which is cloned.
+     */
+    public Statistics(Statistics other) {
+      this.scheme = other.scheme;
+      this.rootData = new StatisticsData(null);
+      other.visitAll(new StatisticsAggregator<Void>() {
+        @Override
+        public void accept(StatisticsData data) {
+          rootData.add(data);
+        }
+
+        public Void aggregate() {
+          return null;
+        }
+      });
+      this.threadData = new ThreadLocal<StatisticsData>();
+    }
+
+    /**
+     * Get or create the thread-local data associated with the current thread.
      */
-    public Statistics(Statistics st) {
-      this.scheme = st.scheme;
-      this.bytesRead = new AtomicLong(st.bytesRead.longValue());
-      this.bytesWritten = new AtomicLong(st.bytesWritten.longValue());
+    private StatisticsData getThreadData() {
+      StatisticsData data = threadData.get();
+      if (data == null) {
+        data = new StatisticsData(
+            new WeakReference<Thread>(Thread.currentThread()));
+        threadData.set(data);
+        synchronized(this) {
+          if (allData == null) {
+            allData = new LinkedList<StatisticsData>();
+          }
+          allData.add(data);
+        }
+      }
+      return data;
     }
 
     /**
@@ -2650,7 +2654,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes read
      */
     public void incrementBytesRead(long newBytes) {
-      bytesRead.getAndAdd(newBytes);
+      getThreadData().bytesRead += newBytes;
     }
     
     /**
@@ -2658,7 +2662,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes written
      */
     public void incrementBytesWritten(long newBytes) {
-      bytesWritten.getAndAdd(newBytes);
+      getThreadData().bytesWritten += newBytes;
     }
     
     /**
@@ -2666,7 +2670,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of read operations
      */
     public void incrementReadOps(int count) {
-      readOps.getAndAdd(count);
+      getThreadData().readOps += count;
     }
 
     /**
@@ -2674,7 +2678,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of large read operations
      */
     public void incrementLargeReadOps(int count) {
-      largeReadOps.getAndAdd(count);
+      getThreadData().largeReadOps += count;
     }
 
     /**
@@ -2682,7 +2686,38 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of write operations
      */
     public void incrementWriteOps(int count) {
-      writeOps.getAndAdd(count);
+      getThreadData().writeOps += count;
+    }
+
+    /**
+     * Apply the given aggregator to all StatisticsData objects associated with
+     * this Statistics object.
+     *
+     * For each StatisticsData object, we will call accept on the visitor.
+     * Finally, at the end, we will call aggregate to get the final total. 
+     *
+     * @param         The visitor to use.
+     * @return        The total.
+     */
+    private synchronized <T> T visitAll(StatisticsAggregator<T> visitor) {
+      visitor.accept(rootData);
+      if (allData != null) {
+        for (Iterator<StatisticsData> iter = allData.iterator();
+            iter.hasNext(); ) {
+          StatisticsData data = iter.next();
+          visitor.accept(data);
+          if (data.owner.get() == null) {
+            /*
+             * If the thread that created this thread-local data no
+             * longer exists, remove the StatisticsData from our list
+             * and fold the values into rootData.
+             */
+            rootData.add(data);
+            iter.remove();
+          }
+        }
+      }
+      return visitor.aggregate();
     }
 
     /**
@@ -2690,7 +2725,18 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @return the number of bytes
      */
     public long getBytesRead() {
-      return bytesRead.get();
+      return visitAll(new StatisticsAggregator<Long>() {
+        private long bytesRead = 0;
+
+        @Override
+        public void accept(StatisticsData data) {
+          bytesRead += data.bytesRead;
+        }
+
+        public Long aggregate() {
+          return bytesRead;
+        }
+      });
     }
     
     /**
@@ -2698,7 +2744,18 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @return the number of bytes
      */
     public long getBytesWritten() {
-      return bytesWritten.get();
+      return visitAll(new StatisticsAggregator<Long>() {
+        private long bytesWritten = 0;
+
+        @Override
+        public void accept(StatisticsData data) {
+          bytesWritten += data.bytesWritten;
+        }
+
+        public Long aggregate() {
+          return bytesWritten;
+        }
+      });
     }
     
     /**
@@ -2706,7 +2763,19 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @return number of read operations
      */
     public int getReadOps() {
-      return readOps.get() + largeReadOps.get();
+      return visitAll(new StatisticsAggregator<Integer>() {
+        private int readOps = 0;
+
+        @Override
+        public void accept(StatisticsData data) {
+          readOps += data.readOps;
+          readOps += data.largeReadOps;
+        }
+
+        public Integer aggregate() {
+          return readOps;
+        }
+      });
     }
 
     /**
@@ -2715,7 +2784,18 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @return number of large read operations
      */
     public int getLargeReadOps() {
-      return largeReadOps.get();
+      return visitAll(new StatisticsAggregator<Integer>() {
+        private int largeReadOps = 0;
+
+        @Override
+        public void accept(StatisticsData data) {
+          largeReadOps += data.largeReadOps;
+        }
+
+        public Integer aggregate() {
+          return largeReadOps;
+        }
+      });
     }
 
     /**
@@ -2724,22 +2804,70 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @return number of write operations
      */
     public int getWriteOps() {
-      return writeOps.get();
+      return visitAll(new StatisticsAggregator<Integer>() {
+        private int writeOps = 0;
+
+        @Override
+        public void accept(StatisticsData data) {
+          writeOps += data.writeOps;
+        }
+
+        public Integer aggregate() {
+          return writeOps;
+        }
+      });
     }
 
+
     @Override
     public String toString() {
-      return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
-          + readOps + " read ops, " + largeReadOps + " large read ops, "
-          + writeOps + " write ops";
+      return visitAll(new StatisticsAggregator<String>() {
+        private StatisticsData total = new StatisticsData(null);
+
+        @Override
+        public void accept(StatisticsData data) {
+          total.add(data);
+        }
+
+        public String aggregate() {
+          return total.toString();
+        }
+      });
     }
-    
+
     /**
-     * Reset the counts of bytes to 0.
+     * Resets all statistics to 0.
+     *
+     * In order to reset, we add up all the thread-local statistics data, and
+     * set rootData to the negative of that.
+     *
+     * This may seem like a counterintuitive way to reset the statsitics.  Why
+     * can't we just zero out all the thread-local data?  Well, thread-local
+     * data can only be modified by the thread that owns it.  If we tried to
+     * modify the thread-local data from this thread, our modification might get
+     * interleaved with a read-modify-write operation done by the thread that
+     * owns the data.  That would result in our update getting lost.
+     *
+     * The approach used here avoids this problem because it only ever reads
+     * (not writes) the thread-local data.  Both reads and writes to rootData
+     * are done under the lock, so we're free to modify rootData from any thread
+     * that holds the lock.
      */
     public void reset() {
-      bytesWritten.set(0);
-      bytesRead.set(0);
+      visitAll(new StatisticsAggregator<Void>() {
+        private StatisticsData total = new StatisticsData(null);
+
+        @Override
+        public void accept(StatisticsData data) {
+          total.add(data);
+        }
+
+        public Void aggregate() {
+          total.negate();
+          rootData.add(total);
+          return null;
+        }
+      });
     }
     
     /**
@@ -2807,19 +2935,19 @@ public abstract class FileSystem extends Configured implements Closeable {
                          ": " + pair.getValue());
     }
   }
-  
+
   // Symlinks are temporarily disabled - see HADOOP-10020 and HADOOP-10052
-  private static boolean symlinkEnabled = false;
-  
-  @Deprecated
+  private static boolean symlinksEnabled = false;
+
+  private static Configuration conf = null;
+
   @VisibleForTesting
-  public static boolean isSymlinksEnabled() {
-    return symlinkEnabled;
+  public static boolean areSymlinksEnabled() {
+    return symlinksEnabled;
   }
-  
-  @Deprecated
+
   @VisibleForTesting
   public static void enableSymlinks() {
-    symlinkEnabled = true;
+    symlinksEnabled = true;
   }
 }

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java

@@ -68,7 +68,6 @@ public abstract class FileSystemLinkResolver<T> {
    * @return Generic type determined by implementation
    * @throws IOException
    */
-  @SuppressWarnings("deprecation")
   public T resolve(final FileSystem filesys, final Path path)
       throws IOException {
     int count = 0;
@@ -88,9 +87,9 @@ public abstract class FileSystemLinkResolver<T> {
               + CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY
               + ").", e);
         }
-        if (!FileSystem.isSymlinksEnabled()) {
-          throw new IOException("Symlink resolution is disabled in"
-              + " this version of Hadoop.");
+        if (!FileSystem.areSymlinksEnabled()) {
+          throw new IOException("Symlink resolution is disabled in" +
+              " this version of Hadoop.");
         }
         if (count++ > FsConstants.MAX_PATH_LINKS) {
           throw new IOException("Possible cyclic loop while " +

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -300,6 +300,9 @@ public class FsShell extends Configured implements Tool {
    */
   public static void main(String argv[]) throws Exception {
     FsShell shell = newShellInstance();
+    Configuration conf = new Configuration();
+    conf.setQuietMode(false);
+    shell.setConf(conf);
     int res;
     try {
       res = ToolRunner.run(shell, argv);

+ 270 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java

@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class Globber {
+  public static final Log LOG = LogFactory.getLog(Globber.class.getName());
+
+  private final FileSystem fs;
+  private final FileContext fc;
+  private final Path pathPattern;
+  private final PathFilter filter;
+  
+  public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
+    this.fs = fs;
+    this.fc = null;
+    this.pathPattern = pathPattern;
+    this.filter = filter;
+  }
+
+  public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
+    this.fs = null;
+    this.fc = fc;
+    this.pathPattern = pathPattern;
+    this.filter = filter;
+  }
+
+  private FileStatus getFileStatus(Path path) throws IOException {
+    try {
+      if (fs != null) {
+        return fs.getFileStatus(path);
+      } else {
+        return fc.getFileStatus(path);
+      }
+    } catch (FileNotFoundException e) {
+      return null;
+    }
+  }
+
+  private FileStatus[] listStatus(Path path) throws IOException {
+    try {
+      if (fs != null) {
+        return fs.listStatus(path);
+      } else {
+        return fc.util().listStatus(path);
+      }
+    } catch (FileNotFoundException e) {
+      return new FileStatus[0];
+    }
+  }
+
+  private Path fixRelativePart(Path path) {
+    if (fs != null) {
+      return fs.fixRelativePart(path);
+    } else {
+      return fc.fixRelativePart(path);
+    }
+  }
+
+  /**
+   * Convert a path component that contains backslash ecape sequences to a
+   * literal string.  This is necessary when you want to explicitly refer to a
+   * path that contains globber metacharacters.
+   */
+  private static String unescapePathComponent(String name) {
+    return name.replaceAll("\\\\(.)", "$1");
+  }
+
+  /**
+   * Translate an absolute path into a list of path components.
+   * We merge double slashes into a single slash here.
+   * POSIX root path, i.e. '/', does not get an entry in the list.
+   */
+  private static List<String> getPathComponents(String path)
+      throws IOException {
+    ArrayList<String> ret = new ArrayList<String>();
+    for (String component : path.split(Path.SEPARATOR)) {
+      if (!component.isEmpty()) {
+        ret.add(component);
+      }
+    }
+    return ret;
+  }
+
+  private String schemeFromPath(Path path) throws IOException {
+    String scheme = path.toUri().getScheme();
+    if (scheme == null) {
+      if (fs != null) {
+        scheme = fs.getUri().getScheme();
+      } else {
+        scheme = fc.getDefaultFileSystem().getUri().getScheme();
+      }
+    }
+    return scheme;
+  }
+
+  private String authorityFromPath(Path path) throws IOException {
+    String authority = path.toUri().getAuthority();
+    if (authority == null) {
+      if (fs != null) {
+        authority = fs.getUri().getAuthority();
+      } else {
+        authority = fc.getDefaultFileSystem().getUri().getAuthority();
+      }
+    }
+    return authority ;
+  }
+
+  public FileStatus[] glob() throws IOException {
+    // First we get the scheme and authority of the pattern that was passed
+    // in.
+    String scheme = schemeFromPath(pathPattern);
+    String authority = authorityFromPath(pathPattern);
+
+    // Next we strip off everything except the pathname itself, and expand all
+    // globs.  Expansion is a process which turns "grouping" clauses,
+    // expressed as brackets, into separate path patterns.
+    String pathPatternString = pathPattern.toUri().getPath();
+    List<String> flattenedPatterns = GlobExpander.expand(pathPatternString);
+
+    // Now loop over all flattened patterns.  In every case, we'll be trying to
+    // match them to entries in the filesystem.
+    ArrayList<FileStatus> results = 
+        new ArrayList<FileStatus>(flattenedPatterns.size());
+    boolean sawWildcard = false;
+    for (String flatPattern : flattenedPatterns) {
+      // Get the absolute path for this flattened pattern.  We couldn't do 
+      // this prior to flattening because of patterns like {/,a}, where which
+      // path you go down influences how the path must be made absolute.
+      Path absPattern = fixRelativePart(new Path(
+          flatPattern.isEmpty() ? Path.CUR_DIR : flatPattern));
+      // Now we break the flattened, absolute pattern into path components.
+      // For example, /a/*/c would be broken into the list [a, *, c]
+      List<String> components =
+          getPathComponents(absPattern.toUri().getPath());
+      // Starting out at the root of the filesystem, we try to match
+      // filesystem entries against pattern components.
+      ArrayList<FileStatus> candidates = new ArrayList<FileStatus>(1);
+      if (Path.WINDOWS && !components.isEmpty()
+          && Path.isWindowsAbsolutePath(absPattern.toUri().getPath(), true)) {
+        // On Windows the path could begin with a drive letter, e.g. /E:/foo.
+        // We will skip matching the drive letter and start from listing the
+        // root of the filesystem on that drive.
+        String driveLetter = components.remove(0);
+        candidates.add(new FileStatus(0, true, 0, 0, 0, new Path(scheme,
+            authority, Path.SEPARATOR + driveLetter + Path.SEPARATOR)));
+      } else {
+        candidates.add(new FileStatus(0, true, 0, 0, 0,
+            new Path(scheme, authority, Path.SEPARATOR)));
+      }
+      
+      for (int componentIdx = 0; componentIdx < components.size();
+          componentIdx++) {
+        ArrayList<FileStatus> newCandidates =
+            new ArrayList<FileStatus>(candidates.size());
+        GlobFilter globFilter = new GlobFilter(components.get(componentIdx));
+        String component = unescapePathComponent(components.get(componentIdx));
+        if (globFilter.hasPattern()) {
+          sawWildcard = true;
+        }
+        if (candidates.isEmpty() && sawWildcard) {
+          // Optimization: if there are no more candidates left, stop examining 
+          // the path components.  We can only do this if we've already seen
+          // a wildcard component-- otherwise, we still need to visit all path 
+          // components in case one of them is a wildcard.
+          break;
+        }
+        if ((componentIdx < components.size() - 1) &&
+            (!globFilter.hasPattern())) {
+          // Optimization: if this is not the terminal path component, and we 
+          // are not matching against a glob, assume that it exists.  If it 
+          // doesn't exist, we'll find out later when resolving a later glob
+          // or the terminal path component.
+          for (FileStatus candidate : candidates) {
+            candidate.setPath(new Path(candidate.getPath(), component));
+          }
+          continue;
+        }
+        for (FileStatus candidate : candidates) {
+          if (globFilter.hasPattern()) {
+            FileStatus[] children = listStatus(candidate.getPath());
+            if (children.length == 1) {
+              // If we get back only one result, this could be either a listing
+              // of a directory with one entry, or it could reflect the fact
+              // that what we listed resolved to a file.
+              //
+              // Unfortunately, we can't just compare the returned paths to
+              // figure this out.  Consider the case where you have /a/b, where
+              // b is a symlink to "..".  In that case, listing /a/b will give
+              // back "/a/b" again.  If we just went by returned pathname, we'd
+              // incorrectly conclude that /a/b was a file and should not match
+              // /a/*/*.  So we use getFileStatus of the path we just listed to
+              // disambiguate.
+              if (!getFileStatus(candidate.getPath()).isDirectory()) {
+                continue;
+              }
+            }
+            for (FileStatus child : children) {
+              // Set the child path based on the parent path.
+              child.setPath(new Path(candidate.getPath(),
+                      child.getPath().getName()));
+              if (globFilter.accept(child.getPath())) {
+                newCandidates.add(child);
+              }
+            }
+          } else {
+            // When dealing with non-glob components, use getFileStatus 
+            // instead of listStatus.  This is an optimization, but it also
+            // is necessary for correctness in HDFS, since there are some
+            // special HDFS directories like .reserved and .snapshot that are
+            // not visible to listStatus, but which do exist.  (See HADOOP-9877)
+            FileStatus childStatus = getFileStatus(
+                new Path(candidate.getPath(), component));
+            if (childStatus != null) {
+              newCandidates.add(childStatus);
+             }
+           }
+        }
+        candidates = newCandidates;
+      }
+      for (FileStatus status : candidates) {
+        // HADOOP-3497 semantics: the user-defined filter is applied at the
+        // end, once the full path is built up.
+        if (filter.accept(status.getPath())) {
+          results.add(status);
+        }
+      }
+    }
+    /*
+     * When the input pattern "looks" like just a simple filename, and we
+     * can't find it, we return null rather than an empty array.
+     * This is a special case which the shell relies on.
+     *
+     * To be more precise: if there were no results, AND there were no
+     * groupings (aka brackets), and no wildcards in the input (aka stars),
+     * we return null.
+     */
+    if ((!sawWildcard) && results.isEmpty() &&
+        (flattenedPatterns.size() <= 1)) {
+      return null;
+    }
+    return results.toArray(new FileStatus[0]);
+  }
+}

+ 53 - 44
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -17,21 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +26,14 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Progressable;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLDecoder;
+import java.util.*;
+
 /**
  * This is an implementation of the Hadoop Archive 
  * Filesystem. This archive Filesystem has index files
@@ -671,20 +664,15 @@ public class HarFileSystem extends FileSystem {
    */
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
-    Path p = makeQualified(f);
-    if (p.toUri().getPath().length() < archivePath.toString().length()) {
-      // still in the source file system
-      return fs.getFileStatus(new Path(p.toUri().getPath()));
-    }
-
-    HarStatus hstatus = getFileHarStatus(p);
+    HarStatus hstatus = getFileHarStatus(f);
     return toFileStatus(hstatus, null);
   }
 
   private HarStatus getFileHarStatus(Path f) throws IOException {
     // get the fs DataInputStream for the underlying file
     // look up the index.
-    Path harPath = getPathInHar(f);
+    Path p = makeQualified(f);
+    Path harPath = getPathInHar(p);
     if (harPath == null) {
       throw new IOException("Invalid file name: " + f + " in " + uri);
     }
@@ -801,11 +789,6 @@ public class HarFileSystem extends FileSystem {
     // to the client
     List<FileStatus> statuses = new ArrayList<FileStatus>();
     Path tmpPath = makeQualified(f);
-    if (tmpPath.toUri().getPath().length() < archivePath.toString().length()) {
-      // still in the source file system
-      return fs.listStatus(new Path(tmpPath.toUri().getPath()));
-    }
-    
     Path harPath = getPathInHar(tmpPath);
     HarStatus hstatus = metadata.archive.get(harPath);
     if (hstatus == null) {
@@ -919,11 +902,15 @@ public class HarFileSystem extends FileSystem {
       private long position, start, end;
       //The underlying data input stream that the
       // underlying filesystem will return.
-      private FSDataInputStream underLyingStream;
+      private final FSDataInputStream underLyingStream;
       //one byte buffer
-      private byte[] oneBytebuff = new byte[1];
+      private final byte[] oneBytebuff = new byte[1];
+      
       HarFsInputStream(FileSystem fs, Path path, long start,
           long length, int bufferSize) throws IOException {
+        if (length < 0) {
+          throw new IllegalArgumentException("Negative length ["+length+"]");
+        }
         underLyingStream = fs.open(path, bufferSize);
         underLyingStream.seek(start);
         // the start of this file in the part file
@@ -937,7 +924,7 @@ public class HarFileSystem extends FileSystem {
       @Override
       public synchronized int available() throws IOException {
         long remaining = end - underLyingStream.getPos();
-        if (remaining > (long)Integer.MAX_VALUE) {
+        if (remaining > Integer.MAX_VALUE) {
           return Integer.MAX_VALUE;
         }
         return (int) remaining;
@@ -969,10 +956,14 @@ public class HarFileSystem extends FileSystem {
         return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff);
       }
       
+      // NB: currently this method actually never executed becusae
+      // java.io.DataInputStream.read(byte[]) directly delegates to 
+      // method java.io.InputStream.read(byte[], int, int).
+      // However, potentially it can be invoked, so leave it intact for now.
       @Override
       public synchronized int read(byte[] b) throws IOException {
-        int ret = read(b, 0, b.length);
-        if (ret != -1) {
+        final int ret = read(b, 0, b.length);
+        if (ret > 0) {
           position += ret;
         }
         return ret;
@@ -1001,15 +992,19 @@ public class HarFileSystem extends FileSystem {
       public synchronized long skip(long n) throws IOException {
         long tmpN = n;
         if (tmpN > 0) {
-          if (position + tmpN > end) {
-            tmpN = end - position;
-          }
+          final long actualRemaining = end - position; 
+          if (tmpN > actualRemaining) {
+            tmpN = actualRemaining;
+          }   
           underLyingStream.seek(tmpN + position);
           position += tmpN;
           return tmpN;
-        }
-        return (tmpN < 0)? -1 : 0;
-      }
+        }   
+        // NB: the contract is described in java.io.InputStream.skip(long):
+        // this method returns the number of bytes actually skipped, so,
+        // the return value should never be negative. 
+        return 0;
+      }   
       
       @Override
       public synchronized long getPos() throws IOException {
@@ -1017,14 +1012,23 @@ public class HarFileSystem extends FileSystem {
       }
       
       @Override
-      public synchronized void seek(long pos) throws IOException {
-        if (pos < 0 || (start + pos > end)) {
-          throw new IOException("Failed to seek: EOF");
-        }
+      public synchronized void seek(final long pos) throws IOException {
+        validatePosition(pos);
         position = start + pos;
         underLyingStream.seek(position);
       }
 
+      private void validatePosition(final long pos) throws IOException {
+        if (pos < 0) {
+          throw new IOException("Negative position: "+pos);
+         }
+         final long length = end - start;
+         if (pos > length) {
+           throw new IOException("Position behind the end " +
+               "of the stream (length = "+length+"): " + pos);
+         }
+      }
+
       @Override
       public boolean seekToNewSource(long targetPos) throws IOException {
         // do not need to implement this
@@ -1041,7 +1045,12 @@ public class HarFileSystem extends FileSystem {
       throws IOException {
         int nlength = length;
         if (start + nlength + pos > end) {
-          nlength = (int) (end - (start + pos));
+          // length corrected to the real remaining length:
+          nlength = (int) (end - start - pos);
+        }
+        if (nlength <= 0) {
+          // EOS:
+          return -1;
         }
         return underLyingStream.read(pos + start , b, offset, nlength);
       }

+ 5 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java

@@ -41,15 +41,6 @@ import org.apache.hadoop.util.Shell;
  */
 public class HardLink { 
 
-  public enum OSType {
-    OS_TYPE_UNIX,
-    OS_TYPE_WIN,
-    OS_TYPE_SOLARIS,
-    OS_TYPE_MAC,
-    OS_TYPE_FREEBSD
-  }
-  
-  public static OSType osType;
   private static HardLinkCommandGetter getHardLinkCommand;
   
   public final LinkStats linkStats; //not static
@@ -57,19 +48,18 @@ public class HardLink {
   //initialize the command "getters" statically, so can use their 
   //methods without instantiating the HardLink object
   static { 
-    osType = getOSType();
-    if (osType == OSType.OS_TYPE_WIN) {
+    if (Shell.WINDOWS) {
       // Windows
       getHardLinkCommand = new HardLinkCGWin();
     } else {
-      // Unix
+      // Unix or Linux
       getHardLinkCommand = new HardLinkCGUnix();
       //override getLinkCountCommand for the particular Unix variant
       //Linux is already set as the default - {"stat","-c%h", null}
-      if (osType == OSType.OS_TYPE_MAC || osType == OSType.OS_TYPE_FREEBSD) {
+      if (Shell.MAC || Shell.FREEBSD) {
         String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
-      } else if (osType == OSType.OS_TYPE_SOLARIS) {
+      } else if (Shell.SOLARIS) {
         String[] linkCountCmdTemplate = {"ls","-l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);        
       }
@@ -80,26 +70,6 @@ public class HardLink {
     linkStats = new LinkStats();
   }
   
-  static private OSType getOSType() {
-    String osName = System.getProperty("os.name");
-    if (Shell.WINDOWS) {
-      return OSType.OS_TYPE_WIN;
-    }
-    else if (osName.contains("SunOS") 
-            || osName.contains("Solaris")) {
-       return OSType.OS_TYPE_SOLARIS;
-    }
-    else if (osName.contains("Mac")) {
-       return OSType.OS_TYPE_MAC;
-    }
-    else if (osName.contains("FreeBSD")) {
-       return OSType.OS_TYPE_FREEBSD;
-    }
-    else {
-      return OSType.OS_TYPE_UNIX;
-    }
-  }
-  
   /**
    * This abstract class bridges the OS-dependent implementations of the 
    * needed functionality for creating hardlinks and querying link counts.
@@ -548,7 +518,7 @@ public class HardLink {
       if (inpMsg == null || exitValue != 0) {
         throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
       }
-      if (osType == OSType.OS_TYPE_SOLARIS) {
+      if (Shell.SOLARIS) {
         String[] result = inpMsg.split("\\s+");
         return Integer.parseInt(result[1]);
       } else {

+ 79 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java

@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.ByteBufferPool;
+
+/**
+ * FSDataInputStreams implement this interface to provide enhanced
+ * byte buffer access.  Usually this takes the form of mmap support.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface HasEnhancedByteBufferAccess {
+  /**
+   * Get a ByteBuffer containing file data.
+   *
+   * This ByteBuffer may come from the stream itself, via a call like mmap,
+   * or it may come from the ByteBufferFactory which is passed in as an
+   * argument.
+   *
+   * @param factory
+   *            If this is non-null, it will be used to create a fallback
+   *            ByteBuffer when the stream itself cannot create one.
+   * @param maxLength
+   *            The maximum length of buffer to return.  We may return a buffer
+   *            which is shorter than this.
+   * @param opts
+   *            Options to use when reading.
+   *
+   * @return
+   *            We will return null on EOF (and only on EOF).
+   *            Otherwise, we will return a direct ByteBuffer containing at
+   *            least one byte.  You must free this ByteBuffer when you are 
+   *            done with it by calling releaseBuffer on it.
+   *            The buffer will continue to be readable until it is released 
+   *            in this manner.  However, the input stream's close method may
+   *            warn about unclosed buffers.
+   * @throws
+   *            IOException: if there was an error reading.
+   *            UnsupportedOperationException: if factory was null, and we
+   *            needed an external byte buffer.  UnsupportedOperationException
+   *            will never be thrown unless the factory argument is null.
+   */
+  public ByteBuffer read(ByteBufferPool factory, int maxLength,
+      EnumSet<ReadOption> opts)
+          throws IOException, UnsupportedOperationException;
+
+  /**
+   * Release a ByteBuffer which was created by the enhanced ByteBuffer read
+   * function. You must not continue using the ByteBuffer after calling this 
+   * function.
+   *
+   * @param buffer
+   *            The ByteBuffer to release.
+   */
+  public void releaseBuffer(ByteBuffer buffer);
+}

+ 32 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidRequestException.java

@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+/**
+ * Thrown when the user makes a malformed request, for example missing required
+ * parameters or parameters that are not valid.
+ */
+public class InvalidRequestException extends IOException {
+  static final long serialVersionUID = 0L;
+
+  public InvalidRequestException(String str) {
+    super(str);
+  }
+}

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -362,6 +362,10 @@ public class LocalDirAllocator {
           totalAvailable += availableOnDisk[i];
         }
 
+        if (totalAvailable == 0){
+          throw new DiskErrorException("No space available in any of the local directories.");
+        }
+
         // Keep rolling the wheel till we get a valid path
         Random r = new java.util.Random();
         while (numDirsSearched < numDirs && returnPath == null) {

+ 24 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -18,9 +18,10 @@
 
 package org.apache.hadoop.fs;
 
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
 import java.net.URI;
-import java.util.*;
+import java.util.Random;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -142,4 +143,25 @@ public class LocalFileSystem extends ChecksumFileSystem {
     }
     return false;
   }
+
+  @Override
+  public boolean supportsSymlinks() {
+    return true;
+  }
+
+  @Override
+  public void createSymlink(Path target, Path link, boolean createParent)
+      throws IOException {
+    fs.createSymlink(target, link, createParent);
+  }
+
+  @Override
+  public FileStatus getFileLinkStatus(final Path f) throws IOException {
+    return fs.getFileLinkStatus(f);
+  }
+
+  @Override
+  public Path getLinkTarget(Path f) throws IOException {
+    return fs.getLinkTarget(f);
+  }
 }

+ 25 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -218,10 +218,13 @@ public class Path implements Comparable {
    */
   public static Path mergePaths(Path path1, Path path2) {
     String path2Str = path2.toUri().getPath();
-    if(hasWindowsDrive(path2Str)) {
-      path2Str = path2Str.substring(path2Str.indexOf(':')+1);
-    }
-    return new Path(path1 + path2Str);
+    path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str));
+    // Add path components explicitly, because simply concatenating two path
+    // string is not safe, for example:
+    // "/" + "/foo" yields "//foo", which will be parsed as authority in Path
+    return new Path(path1.toUri().getScheme(), 
+        path1.toUri().getAuthority(), 
+        path1.toUri().getPath() + path2Str);
   }
 
   /**
@@ -247,8 +250,8 @@ public class Path implements Comparable {
     }
     
     // trim trailing slash from non-root path (ignoring windows drive)
-    int minLength = hasWindowsDrive(path) ? 4 : 1;
-    if (path.length() > minLength && path.endsWith("/")) {
+    int minLength = startPositionWithoutWindowsDrive(path) + 1;
+    if (path.length() > minLength && path.endsWith(SEPARATOR)) {
       path = path.substring(0, path.length()-1);
     }
     
@@ -259,6 +262,14 @@ public class Path implements Comparable {
     return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
   }
 
+  private static int startPositionWithoutWindowsDrive(String path) {
+    if (hasWindowsDrive(path)) {
+      return path.charAt(0) ==  SEPARATOR_CHAR ? 3 : 2;
+    } else {
+      return 0;
+    }
+  }
+  
   /**
    * Determine whether a given path string represents an absolute path on
    * Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
@@ -270,13 +281,11 @@ public class Path implements Comparable {
    */
   public static boolean isWindowsAbsolutePath(final String pathString,
                                               final boolean slashed) {
-    int start = (slashed ? 1 : 0);
-
-    return
-        hasWindowsDrive(pathString) &&
-        pathString.length() >= (start + 3) &&
-        ((pathString.charAt(start + 2) == SEPARATOR_CHAR) ||
-          (pathString.charAt(start + 2) == '\\'));
+    int start = startPositionWithoutWindowsDrive(pathString);
+    return start > 0
+        && pathString.length() > start
+        && ((pathString.charAt(start) == SEPARATOR_CHAR) ||
+            (pathString.charAt(start) == '\\'));
   }
 
   /** Convert this to a URI. */
@@ -300,7 +309,7 @@ public class Path implements Comparable {
    *  True if the path component (i.e. directory) of this URI is absolute.
    */
   public boolean isUriPathAbsolute() {
-    int start = hasWindowsDrive(uri.getPath()) ? 3 : 0;
+    int start = startPositionWithoutWindowsDrive(uri.getPath());
     return uri.getPath().startsWith(SEPARATOR, start);
    }
   
@@ -334,7 +343,7 @@ public class Path implements Comparable {
   public Path getParent() {
     String path = uri.getPath();
     int lastSlash = path.lastIndexOf('/');
-    int start = hasWindowsDrive(path) ? 3 : 0;
+    int start = startPositionWithoutWindowsDrive(path);
     if ((path.length() == start) ||               // empty path
         (lastSlash == start && path.length() == start+1)) { // at root
       return null;
@@ -343,8 +352,7 @@ public class Path implements Comparable {
     if (lastSlash==-1) {
       parent = CUR_DIR;
     } else {
-      int end = hasWindowsDrive(path) ? 3 : 0;
-      parent = path.substring(0, lastSlash==end?end+1:lastSlash);
+      parent = path.substring(0, lastSlash==start?start+1:lastSlash);
     }
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
   }

+ 164 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -16,8 +16,11 @@
  * limitations under the License.
  */
 
+
 package org.apache.hadoop.fs;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.BufferedOutputStream;
 import java.io.DataOutput;
 import java.io.File;
@@ -51,6 +54,13 @@ import org.apache.hadoop.util.StringUtils;
 public class RawLocalFileSystem extends FileSystem {
   static final URI NAME = URI.create("file:///");
   private Path workingDir;
+  // Temporary workaround for HADOOP-9652.
+  private static boolean useDeprecatedFileStatus = true;
+
+  @VisibleForTesting
+  public static void useStatIfAvailable() {
+    useDeprecatedFileStatus = !Stat.isAvailable();
+  }
   
   public RawLocalFileSystem() {
     workingDir = getInitialWorkingDirectory();
@@ -352,8 +362,11 @@ public class RawLocalFileSystem extends FileSystem {
       throw new FileNotFoundException("File " + f + " does not exist");
     }
     if (localf.isFile()) {
+      if (!useDeprecatedFileStatus) {
+        return new FileStatus[] { getFileStatus(f) };
+      }
       return new FileStatus[] {
-        new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
+        new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), this)};
     }
 
     String[] names = localf.list();
@@ -432,7 +445,6 @@ public class RawLocalFileSystem extends FileSystem {
   public void setWorkingDirectory(Path newDir) {
     workingDir = makeAbsolute(newDir);
     checkPath(workingDir);
-    
   }
   
   @Override
@@ -486,15 +498,22 @@ public class RawLocalFileSystem extends FileSystem {
   
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
+    return getFileLinkStatusInternal(f, true);
+  }
+
+  @Deprecated
+  private FileStatus deprecatedGetFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     if (path.exists()) {
-      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
+      return new DeprecatedRawLocalFileStatus(pathToFile(f),
+          getDefaultBlockSize(f), this);
     } else {
       throw new FileNotFoundException("File " + f + " does not exist");
     }
   }
 
-  static class RawLocalFileStatus extends FileStatus {
+  @Deprecated
+  static class DeprecatedRawLocalFileStatus extends FileStatus {
     /* We can add extra fields here. It breaks at least CopyFiles.FilePair().
      * We recognize if the information is already loaded by check if
      * onwer.equals("").
@@ -503,7 +522,7 @@ public class RawLocalFileSystem extends FileSystem {
       return !super.getOwner().isEmpty(); 
     }
     
-    RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) { 
+    DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
       super(f.length(), f.isDirectory(), 1, defaultBlockSize,
           f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
             fs.getWorkingDirectory()));
@@ -636,4 +655,144 @@ public class RawLocalFileSystem extends FileSystem {
     }
   }
 
+  @Override
+  public boolean supportsSymlinks() {
+    return true;
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public void createSymlink(Path target, Path link, boolean createParent)
+      throws IOException {
+    if (!FileSystem.areSymlinksEnabled()) {
+      throw new UnsupportedOperationException("Symlinks not supported");
+    }
+    final String targetScheme = target.toUri().getScheme();
+    if (targetScheme != null && !"file".equals(targetScheme)) {
+      throw new IOException("Unable to create symlink to non-local file "+
+                            "system: "+target.toString());
+    }
+    if (createParent) {
+      mkdirs(link.getParent());
+    }
+
+    // NB: Use createSymbolicLink in java.nio.file.Path once available
+    int result = FileUtil.symLink(target.toString(),
+        makeAbsolute(link).toString());
+    if (result != 0) {
+      throw new IOException("Error " + result + " creating symlink " +
+          link + " to " + target);
+    }
+  }
+
+  /**
+   * Return a FileStatus representing the given path. If the path refers
+   * to a symlink return a FileStatus representing the link rather than
+   * the object the link refers to.
+   */
+  @Override
+  public FileStatus getFileLinkStatus(final Path f) throws IOException {
+    FileStatus fi = getFileLinkStatusInternal(f, false);
+    // getFileLinkStatus is supposed to return a symlink with a
+    // qualified path
+    if (fi.isSymlink()) {
+      Path targetQual = FSLinkResolver.qualifySymlinkTarget(this.getUri(),
+          fi.getPath(), fi.getSymlink());
+      fi.setSymlink(targetQual);
+    }
+    return fi;
+  }
+
+  /**
+   * Public {@link FileStatus} methods delegate to this function, which in turn
+   * either call the new {@link Stat} based implementation or the deprecated
+   * methods based on platform support.
+   * 
+   * @param f Path to stat
+   * @param dereference whether to dereference the final path component if a
+   *          symlink
+   * @return FileStatus of f
+   * @throws IOException
+   */
+  private FileStatus getFileLinkStatusInternal(final Path f,
+      boolean dereference) throws IOException {
+    if (!useDeprecatedFileStatus) {
+      return getNativeFileLinkStatus(f, dereference);
+    } else if (dereference) {
+      return deprecatedGetFileStatus(f);
+    } else {
+      return deprecatedGetFileLinkStatusInternal(f);
+    }
+  }
+
+  /**
+   * Deprecated. Remains for legacy support. Should be removed when {@link Stat}
+   * gains support for Windows and other operating systems.
+   */
+  @Deprecated
+  private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
+      throws IOException {
+    String target = FileUtil.readLink(new File(f.toString()));
+
+    try {
+      FileStatus fs = getFileStatus(f);
+      // If f refers to a regular file or directory
+      if (target.isEmpty()) {
+        return fs;
+      }
+      // Otherwise f refers to a symlink
+      return new FileStatus(fs.getLen(),
+          false,
+          fs.getReplication(),
+          fs.getBlockSize(),
+          fs.getModificationTime(),
+          fs.getAccessTime(),
+          fs.getPermission(),
+          fs.getOwner(),
+          fs.getGroup(),
+          new Path(target),
+          f);
+    } catch (FileNotFoundException e) {
+      /* The exists method in the File class returns false for dangling
+       * links so we can get a FileNotFoundException for links that exist.
+       * It's also possible that we raced with a delete of the link. Use
+       * the readBasicFileAttributes method in java.nio.file.attributes
+       * when available.
+       */
+      if (!target.isEmpty()) {
+        return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(),
+            "", "", new Path(target), f);
+      }
+      // f refers to a file or directory that does not exist
+      throw e;
+    }
+  }
+  /**
+   * Calls out to platform's native stat(1) implementation to get file metadata
+   * (permissions, user, group, atime, mtime, etc). This works around the lack
+   * of lstat(2) in Java 6.
+   * 
+   *  Currently, the {@link Stat} class used to do this only supports Linux
+   *  and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)}
+   *  implementation (deprecated) remains further OS support is added.
+   *
+   * @param f File to stat
+   * @param dereference whether to dereference symlinks
+   * @return FileStatus of f
+   * @throws IOException
+   */
+  private FileStatus getNativeFileLinkStatus(final Path f,
+      boolean dereference) throws IOException {
+    checkPath(f);
+    Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this);
+    FileStatus status = stat.getFileStatus();
+    return status;
+  }
+
+  @Override
+  public Path getLinkTarget(Path f) throws IOException {
+    FileStatus fi = getFileLinkStatusInternal(f, false);
+    // return an unqualified symlink target
+    return fi.getSymlink();
+  }
 }

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java

@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Options that can be used when reading from a FileSystem.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public enum ReadOption {
+  /**
+   * Skip checksums when reading.  This option may be useful when reading a file
+   * format that has built-in checksums, or for testing purposes.
+   */
+  SKIP_CHECKSUMS,
+}

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java

@@ -22,7 +22,9 @@ import java.io.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** Stream that permits seeking. */
+/**
+ *  Stream that permits seeking.
+ */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public interface Seekable {

+ 173 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java

@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Wrapper for the Unix stat(1) command. Used to workaround the lack of 
+ * lstat(2) in Java 6.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class Stat extends Shell {
+
+  private final Path original;
+  private final Path qualified;
+  private final Path path;
+  private final long blockSize;
+  private final boolean dereference;
+
+  private FileStatus stat;
+
+  public Stat(Path path, long blockSize, boolean deref, FileSystem fs)
+      throws IOException {
+    super(0L, true);
+    // Original path
+    this.original = path;
+    // Qualify the original and strip out URI fragment via toUri().getPath()
+    Path stripped = new Path(
+        original.makeQualified(fs.getUri(), fs.getWorkingDirectory())
+        .toUri().getPath());
+    // Re-qualify the bare stripped path and store it
+    this.qualified = 
+        stripped.makeQualified(fs.getUri(), fs.getWorkingDirectory());
+    // Strip back down to a plain path
+    this.path = new Path(qualified.toUri().getPath());
+    this.blockSize = blockSize;
+    this.dereference = deref;
+    // LANG = C setting
+    Map<String, String> env = new HashMap<String, String>();
+    env.put("LANG", "C");
+    setEnvironment(env);
+  }
+
+  public FileStatus getFileStatus() throws IOException {
+    run();
+    return stat;
+  }
+
+  /**
+   * Whether Stat is supported on the current platform
+   * @return
+   */
+  public static boolean isAvailable() {
+    if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) {
+      return true;
+    }
+    return false;
+  }
+
+  @VisibleForTesting
+  FileStatus getFileStatusForTesting() {
+    return stat;
+  }
+
+  @Override
+  protected String[] getExecString() {
+    String derefFlag = "-";
+    if (dereference) {
+      derefFlag = "-L";
+    }
+    if (Shell.LINUX) {
+      return new String[] {
+          "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() };
+    } else if (Shell.FREEBSD || Shell.MAC) {
+      return new String[] {
+          "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'",
+          path.toString() };
+    } else {
+      throw new UnsupportedOperationException(
+          "stat is not supported on this platform");
+    }
+  }
+
+  @Override
+  protected void parseExecResult(BufferedReader lines) throws IOException {
+    // Reset stat
+    stat = null;
+
+    String line = lines.readLine();
+    if (line == null) {
+      throw new IOException("Unable to stat path: " + original);
+    }
+    if (line.endsWith("No such file or directory") ||
+        line.endsWith("Not a directory")) {
+      throw new FileNotFoundException("File " + original + " does not exist");
+    }
+    if (line.endsWith("Too many levels of symbolic links")) {
+      throw new IOException("Possible cyclic loop while following symbolic" +
+          " link " + original);
+    }
+    // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
+    StringTokenizer tokens = new StringTokenizer(line, ",");
+    try {
+      long length = Long.parseLong(tokens.nextToken());
+      boolean isDir = tokens.nextToken().equalsIgnoreCase("directory") ? true
+          : false;
+      // Convert from seconds to milliseconds
+      long modTime = Long.parseLong(tokens.nextToken())*1000;
+      long accessTime = Long.parseLong(tokens.nextToken())*1000;
+      String octalPerms = tokens.nextToken();
+      // FreeBSD has extra digits beyond 4, truncate them
+      if (octalPerms.length() > 4) {
+        int len = octalPerms.length();
+        octalPerms = octalPerms.substring(len-4, len);
+      }
+      FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8));
+      String owner = tokens.nextToken();
+      String group = tokens.nextToken();
+      String symStr = tokens.nextToken();
+      // 'notalink'
+      // 'link' -> `target'
+      // '' -> ''
+      Path symlink = null;
+      StringTokenizer symTokens = new StringTokenizer(symStr, "`");
+      symTokens.nextToken();
+      try {
+        String target = symTokens.nextToken();
+        target = target.substring(0, target.length()-1);
+        if (!target.isEmpty()) {
+          symlink = new Path(target);
+        }
+      } catch (NoSuchElementException e) {
+        // null if not a symlink
+      }
+      // Set stat
+      stat = new FileStatus(length, isDir, 1, blockSize, modTime, accessTime,
+          perms, owner, group, symlink, qualified);
+    } catch (NumberFormatException e) {
+      throw new IOException("Unexpected stat output: " + line, e);
+    } catch (NoSuchElementException e) {
+      throw new IOException("Unexpected stat output: " + line, e);
+    }
+  }
+}

+ 36 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+public class ZeroCopyUnavailableException extends IOException {
+  private static final long serialVersionUID = 0L;
+
+  public ZeroCopyUnavailableException(String message) {
+    super(message);
+  }
+
+  public ZeroCopyUnavailableException(String message, Exception e) {
+    super(message, e);
+  }
+
+  public ZeroCopyUnavailableException(Exception e) {
+    super(e);
+  }
+}

+ 5 - 89
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.fs.local;
 
-import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -28,13 +26,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
 
 /**
  * The RawLocalFs implementation of AbstractFileSystem.
@@ -72,90 +66,12 @@ public class RawLocalFs extends DelegateToFileSystem {
   public FsServerDefaults getServerDefaults() throws IOException {
     return LocalConfigKeys.getServerDefaults();
   }
-  
-  @Override
-  public boolean supportsSymlinks() {
-    return true;
-  }
-
-  @Override
-  public void createSymlink(Path target, Path link, boolean createParent)
-      throws IOException {
-    final String targetScheme = target.toUri().getScheme();
-    if (targetScheme != null && !"file".equals(targetScheme)) {
-      throw new IOException("Unable to create symlink to non-local file "+
-          "system: "+target.toString());
-    }
-
-    if (createParent) {
-      mkdir(link.getParent(), FsPermission.getDirDefault(), true);
-    }
-
-    // NB: Use createSymbolicLink in java.nio.file.Path once available
-    int result = FileUtil.symLink(target.toString(), link.toString());
-    if (result != 0) {
-      throw new IOException("Error " + result + " creating symlink " +
-          link + " to " + target);
-    }
-  }
 
-  /**
-   * Return a FileStatus representing the given path. If the path refers 
-   * to a symlink return a FileStatus representing the link rather than
-   * the object the link refers to.
-   */
-  @Override
-  public FileStatus getFileLinkStatus(final Path f) throws IOException {
-    String target = FileUtil.readLink(new File(f.toString()));
-    try {
-      FileStatus fs = getFileStatus(f);
-      // If f refers to a regular file or directory      
-      if (target.isEmpty()) {
-        return fs;
-      }
-      // Otherwise f refers to a symlink
-      return new FileStatus(fs.getLen(), 
-          false,
-          fs.getReplication(), 
-          fs.getBlockSize(),
-          fs.getModificationTime(),
-          fs.getAccessTime(),
-          fs.getPermission(),
-          fs.getOwner(),
-          fs.getGroup(),
-          new Path(target),
-          f);
-    } catch (FileNotFoundException e) {
-      /* The exists method in the File class returns false for dangling 
-       * links so we can get a FileNotFoundException for links that exist.
-       * It's also possible that we raced with a delete of the link. Use
-       * the readBasicFileAttributes method in java.nio.file.attributes 
-       * when available.
-       */
-      if (!target.isEmpty()) {
-        return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(), 
-            "", "", new Path(target), f);        
-      }
-      // f refers to a file or directory that does not exist
-      throw e;
-    }
-  }
-  
-   @Override
-   public boolean isValidName(String src) {
-     // Different local file systems have different validation rules.  Skip
-     // validation here and just let the OS handle it.  This is consistent with
-     // RawLocalFileSystem.
-     return true;
-   }
-  
   @Override
-  public Path getLinkTarget(Path f) throws IOException {
-    /* We should never get here. Valid local links are resolved transparently
-     * by the underlying local file system and accessing a dangling link will 
-     * result in an IOException, not an UnresolvedLinkException, so FileContext
-     * should never call this function.
-     */
-    throw new AssertionError();
+  public boolean isValidName(String src) {
+    // Different local file systems have different validation rules. Skip
+    // validation here and just let the OS handle it. This is consistent with
+    // RawLocalFileSystem.
+    return true;
   }
 }

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -120,8 +120,7 @@ public class FsPermission implements Writable {
   }
 
   public void fromShort(short n) {
-    FsAction[] v = FsAction.values();
-
+    FsAction[] v = FSACTION_VALUES;
     set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n >>> 9) & 1) == 1) );
   }
 
@@ -210,6 +209,8 @@ public class FsPermission implements Writable {
   public static final int DEFAULT_UMASK = 
                   CommonConfigurationKeys.FS_PERMISSIONS_UMASK_DEFAULT;
 
+  private static final FsAction[] FSACTION_VALUES = FsAction.values();
+
   /** 
    * Get the user file creation mask (umask)
    * 
@@ -303,6 +304,13 @@ public class FsPermission implements Writable {
     return new FsPermission((short)00666);
   }
 
+  /**
+   * Get the default permission for cache pools.
+   */
+  public static FsPermission getCachePoolDefault() {
+    return new FsPermission((short)00755);
+  }
+
   /**
    * Create a FsPermission from a Unix symbolic permission string
    * @param unixSymbolicPermission e.g. "-rw-rw-rw-"

+ 15 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java

@@ -106,10 +106,12 @@ public class PathData implements Comparable<PathData> {
 
   /**
    * Validates the given Windows path.
-   * Throws IOException on failure.
    * @param pathString a String of the path suppliued by the user.
+   * @return true if the URI scheme was not present in the pathString but
+   * inferred; false, otherwise.
+   * @throws IOException if anything goes wrong
    */
-  private void ValidateWindowsPath(String pathString)
+  private static boolean checkIfSchemeInferredFromPath(String pathString)
   throws IOException
   {
     if (windowsNonUriAbsolutePath1.matcher(pathString).find()) {
@@ -118,23 +120,21 @@ public class PathData implements Comparable<PathData> {
         throw new IOException("Invalid path string " + pathString);
       }
 
-      inferredSchemeFromPath = true;
-      return;
+      return true;
     }
 
     // Is it a forward slash-separated absolute path?
     if (windowsNonUriAbsolutePath2.matcher(pathString).find()) {
-      inferredSchemeFromPath = true;
-      return;
+      return true;
     }
 
     // Does it look like a URI? If so then just leave it alone.
     if (potentialUri.matcher(pathString).find()) {
-      return;
+      return false;
     }
 
     // Looks like a relative path on Windows.
-    return;
+    return false;
   }
 
   /**
@@ -153,7 +153,7 @@ public class PathData implements Comparable<PathData> {
     setStat(stat);
 
     if (Path.WINDOWS) {
-      ValidateWindowsPath(pathString);
+      inferredSchemeFromPath = checkIfSchemeInferredFromPath(pathString);
     }
   }
 
@@ -302,7 +302,7 @@ public class PathData implements Comparable<PathData> {
     // check getPath() so scheme slashes aren't considered part of the path
     String separator = uri.getPath().endsWith(Path.SEPARATOR)
         ? "" : Path.SEPARATOR;
-    return uri + separator + basename;
+    return uriToString(uri, inferredSchemeFromPath) + separator + basename;
   }
   
   protected enum PathType { HAS_SCHEME, SCHEMELESS_ABSOLUTE, RELATIVE };
@@ -356,7 +356,7 @@ public class PathData implements Comparable<PathData> {
             if (globUri.getAuthority() == null) {
               matchUri = removeAuthority(matchUri);
             }
-            globMatch = matchUri.toString();
+            globMatch = uriToString(matchUri, false);
             break;
           case SCHEMELESS_ABSOLUTE: // take just the uri's path
             globMatch = matchUri.getPath();
@@ -438,6 +438,10 @@ public class PathData implements Comparable<PathData> {
    */
   @Override
   public String toString() {
+    return uriToString(uri, inferredSchemeFromPath);
+  }
+ 
+  private static String uriToString(URI uri, boolean inferredSchemeFromPath) {
     String scheme = uri.getScheme();
     // No interpretation of symbols. Just decode % escaped chars.
     String decodedRemainder = uri.getSchemeSpecificPart();

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -766,8 +766,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     zkClient = getNewZooKeeper();
     LOG.debug("Created new connection for " + this);
   }
-  
-  void terminateConnection() {
+
+  @InterfaceAudience.Private
+  public synchronized void terminateConnection() {
     if (zkClient == null) {
       return;
     }

+ 12 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -63,7 +63,7 @@ public abstract class HAAdmin extends Configured implements Tool {
 
   private int rpcTimeoutForChecks = -1;
   
-  private static Map<String, UsageInfo> USAGE =
+  protected final static Map<String, UsageInfo> USAGE =
     ImmutableMap.<String, UsageInfo>builder()
     .put("-transitionToActive",
         new UsageInfo("<serviceId>", "Transitions the service into Active state"))
@@ -91,6 +91,14 @@ public abstract class HAAdmin extends Configured implements Tool {
   protected PrintStream out = System.out;
   private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
 
+  protected HAAdmin() {
+    super();
+  }
+
+  protected HAAdmin(Configuration conf) {
+    super(conf);
+  }
+
   protected abstract HAServiceTarget resolveTarget(String string);
 
   protected String getUsageString() {
@@ -461,9 +469,9 @@ public abstract class HAAdmin extends Configured implements Tool {
     return 0;
   }
   
-  private static class UsageInfo {
-    private final String args;
-    private final String help;
+  protected static class UsageInfo {
+    public final String args;
+    public final String help;
     
     public UsageInfo(String args, String help) {
       this.args = args;

+ 13 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java

@@ -31,15 +31,25 @@ public class HttpConfig {
   private static Policy policy;
   public enum Policy {
     HTTP_ONLY,
-    HTTPS_ONLY;
+    HTTPS_ONLY,
+    HTTP_AND_HTTPS;
 
     public static Policy fromString(String value) {
-      if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
-              .HTTP_POLICY_HTTPS_ONLY)) {
+      if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
         return HTTPS_ONLY;
+      } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
+        return HTTP_AND_HTTPS;
       }
       return HTTP_ONLY;
     }
+
+    public boolean isHttpEnabled() {
+      return this == HTTP_ONLY || this == HTTP_AND_HTTPS;
+    }
+
+    public boolean isHttpsEnabled() {
+      return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
+    }
   }
 
   static {

+ 94 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java

@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import java.util.HashMap;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogConfigurationException;
+import org.apache.commons.logging.LogFactory;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Logger;
+import org.mortbay.jetty.NCSARequestLog;
+import org.mortbay.jetty.RequestLog;
+
+/**
+ * RequestLog object for use with Http
+ */
+public class HttpRequestLog {
+
+  public static final Log LOG = LogFactory.getLog(HttpRequestLog.class);
+  private static final HashMap<String, String> serverToComponent;
+
+  static {
+    serverToComponent = new HashMap<String, String>();
+    serverToComponent.put("cluster", "resourcemanager");
+    serverToComponent.put("hdfs", "namenode");
+    serverToComponent.put("node", "nodemanager");
+  }
+
+  public static RequestLog getRequestLog(String name) {
+
+    String lookup = serverToComponent.get(name);
+    if (lookup != null) {
+      name = lookup;
+    }
+    String loggerName = "http.requests." + name;
+    String appenderName = name + "requestlog";
+    Log logger = LogFactory.getLog(loggerName);
+
+    if (logger instanceof Log4JLogger) {
+      Log4JLogger httpLog4JLog = (Log4JLogger)logger;
+      Logger httpLogger = httpLog4JLog.getLogger();
+      Appender appender = null;
+
+      try {
+        appender = httpLogger.getAppender(appenderName);
+      } catch (LogConfigurationException e) {
+        LOG.warn("Http request log for " + loggerName
+            + " could not be created");
+        throw e;
+      }
+
+      if (appender == null) {
+        LOG.info("Http request log for " + loggerName
+            + " is not defined");
+        return null;
+      }
+
+      if (appender instanceof HttpRequestLogAppender) {
+        HttpRequestLogAppender requestLogAppender
+          = (HttpRequestLogAppender)appender;
+        NCSARequestLog requestLog = new NCSARequestLog();
+        requestLog.setFilename(requestLogAppender.getFilename());
+        requestLog.setRetainDays(requestLogAppender.getRetainDays());
+        return requestLog;
+      }
+      else {
+        LOG.warn("Jetty request log for " + loggerName
+            + " was of the wrong class");
+        return null;
+      }
+    }
+    else {
+      LOG.warn("Jetty request log can only be enabled using Log4j");
+      return null;
+    }
+  }
+}

+ 62 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.log4j.AppenderSkeleton;
+
+/**
+ * Log4j Appender adapter for HttpRequestLog
+ */
+public class HttpRequestLogAppender extends AppenderSkeleton {
+
+  private String filename;
+  private int retainDays;
+
+  public HttpRequestLogAppender() {
+  }
+
+  public void setRetainDays(int retainDays) {
+    this.retainDays = retainDays;
+  }
+
+  public int getRetainDays() {
+    return retainDays;
+  }
+
+  public void setFilename(String filename) {
+    this.filename = filename;
+  }
+
+  public String getFilename() {
+    return filename;
+  }
+
+  @Override
+  public void append(LoggingEvent event) {
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+}

+ 403 - 177
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -19,11 +19,13 @@ package org.apache.hadoop.http;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.io.PrintWriter;
 import java.net.BindException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.URL;
-import java.security.GeneralSecurityException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Enumeration;
@@ -31,7 +33,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import javax.net.ssl.SSLServerSocketFactory;
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -46,6 +47,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
@@ -58,16 +60,18 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.mortbay.io.Buffer;
 import org.mortbay.jetty.Connector;
 import org.mortbay.jetty.Handler;
 import org.mortbay.jetty.MimeTypes;
+import org.mortbay.jetty.RequestLog;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.handler.ContextHandler;
 import org.mortbay.jetty.handler.ContextHandlerCollection;
+import org.mortbay.jetty.handler.HandlerCollection;
+import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
 import org.mortbay.jetty.security.SslSocketConnector;
 import org.mortbay.jetty.servlet.Context;
@@ -80,6 +84,8 @@ import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.thread.QueuedThreadPool;
 import org.mortbay.util.MultiException;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 
 /**
@@ -108,28 +114,264 @@ public class HttpServer implements FilterContainer {
 
   public static final String BIND_ADDRESS = "bind.address";
 
-  private AccessControlList adminsAcl;
+  private final AccessControlList adminsAcl;
 
-  private SSLFactory sslFactory;
   protected final Server webServer;
-  protected final Connector listener;
+
+  private static class ListenerInfo {
+    /**
+     * Boolean flag to determine whether the HTTP server should clean up the
+     * listener in stop().
+     */
+    private final boolean isManaged;
+    private final Connector listener;
+    private ListenerInfo(boolean isManaged, Connector listener) {
+      this.isManaged = isManaged;
+      this.listener = listener;
+    }
+  }
+
+  private final List<ListenerInfo> listeners = Lists.newArrayList();
+
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
   protected final Map<Context, Boolean> defaultContexts =
       new HashMap<Context, Boolean>();
   protected final List<String> filterNames = new ArrayList<String>();
-  private static final int MAX_RETRIES = 10;
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
 
-  private final boolean listenerStartedExternally;
+  /**
+   * Class to construct instances of HTTP server with specific options.
+   */
+  public static class Builder {
+    private ArrayList<URI> endpoints = Lists.newArrayList();
+    private Connector connector;
+    private String name;
+    private Configuration conf;
+    private String[] pathSpecs;
+    private AccessControlList adminsAcl;
+    private boolean securityEnabled = false;
+    private String usernameConfKey;
+    private String keytabConfKey;
+    private boolean needsClientAuth;
+    private String trustStore;
+    private String trustStorePassword;
+    private String trustStoreType;
+
+    private String keyStore;
+    private String keyStorePassword;
+    private String keyStoreType;
+
+    // The -keypass option in keytool
+    private String keyPassword;
+
+    @Deprecated
+    private String bindAddress;
+    @Deprecated
+    private int port = -1;
+
+    private boolean findPort;
+
+    private String hostName;
+
+    public Builder setName(String name){
+      this.name = name;
+      return this;
+    }
+
+    /**
+     * Add an endpoint that the HTTP server should listen to.
+     *
+     * @param endpoint
+     *          the endpoint of that the HTTP server should listen to. The
+     *          scheme specifies the protocol (i.e. HTTP / HTTPS), the host
+     *          specifies the binding address, and the port specifies the
+     *          listening port. Unspecified or zero port means that the server
+     *          can listen to any port.
+     */
+    public Builder addEndpoint(URI endpoint) {
+      endpoints.add(endpoint);
+      return this;
+    }
+
+    /**
+     * Set the hostname of the http server. The host name is used to resolve the
+     * _HOST field in Kerberos principals. The hostname of the first listener
+     * will be used if the name is unspecified.
+     */
+    public Builder hostName(String hostName) {
+      this.hostName = hostName;
+      return this;
+    }
+    
+    public Builder trustStore(String location, String password, String type) {
+      this.trustStore = location;
+      this.trustStorePassword = password;
+      this.trustStoreType = type;
+      return this;
+    }
+
+    public Builder keyStore(String location, String password, String type) {
+      this.keyStore = location;
+      this.keyStorePassword = password;
+      this.keyStoreType = type;
+      return this;
+    }
+
+    public Builder keyPassword(String password) {
+      this.keyPassword = password;
+      return this;
+    }
+
+    /**
+     * Specify whether the server should authorize the client in SSL
+     * connections.
+     */
+    public Builder needsClientAuth(boolean value) {
+      this.needsClientAuth = value;
+      return this;
+    }
+
+    /**
+     * Use addEndpoint() instead.
+     */
+    @Deprecated
+    public Builder setBindAddress(String bindAddress){
+      this.bindAddress = bindAddress;
+      return this;
+    }
+
+    /**
+     * Use addEndpoint() instead.
+     */
+    @Deprecated
+    public Builder setPort(int port) {
+      this.port = port;
+      return this;
+    }
+    
+    public Builder setFindPort(boolean findPort) {
+      this.findPort = findPort;
+      return this;
+    }
+    
+    public Builder setConf(Configuration conf) {
+      this.conf = conf;
+      return this;
+    }
+    
+    public Builder setConnector(Connector connector) {
+      this.connector = connector;
+      return this;
+    }
+    
+    public Builder setPathSpec(String[] pathSpec) {
+      this.pathSpecs = pathSpec;
+      return this;
+    }
+    
+    public Builder setACL(AccessControlList acl) {
+      this.adminsAcl = acl;
+      return this;
+    }
+    
+    public Builder setSecurityEnabled(boolean securityEnabled) {
+      this.securityEnabled = securityEnabled;
+      return this;
+    }
+    
+    public Builder setUsernameConfKey(String usernameConfKey) {
+      this.usernameConfKey = usernameConfKey;
+      return this;
+    }
+    
+    public Builder setKeytabConfKey(String keytabConfKey) {
+      this.keytabConfKey = keytabConfKey;
+      return this;
+    }
+    
+    public HttpServer build() throws IOException {
+      if (this.name == null) {
+        throw new HadoopIllegalArgumentException("name is not set");
+      }
+
+      // Make the behavior compatible with deprecated interfaces
+      if (bindAddress != null && port != -1) {
+        try {
+          endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
+        } catch (URISyntaxException e) {
+          throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
+        }
+      }
+
+      if (endpoints.size() == 0 && connector == null) {
+        throw new HadoopIllegalArgumentException("No endpoints specified");
+      }
+
+      if (hostName == null) {
+        hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
+            0).getHost();
+      }
+      
+      if (this.conf == null) {
+        conf = new Configuration();
+      }
+      
+      HttpServer server = new HttpServer(this);
+
+      if (this.securityEnabled) {
+        server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
+      }
+
+      if (connector != null) {
+        server.addUnmanagedListener(connector);
+      }
+
+      for (URI ep : endpoints) {
+        Connector listener = null;
+        String scheme = ep.getScheme();
+        if ("http".equals(scheme)) {
+          listener = HttpServer.createDefaultChannelConnector();
+        } else if ("https".equals(scheme)) {
+          SslSocketConnector c = new SslSocketConnector();
+          c.setNeedClientAuth(needsClientAuth);
+          c.setKeyPassword(keyPassword);
+
+          if (keyStore != null) {
+            c.setKeystore(keyStore);
+            c.setKeystoreType(keyStoreType);
+            c.setPassword(keyStorePassword);
+          }
+
+          if (trustStore != null) {
+            c.setTruststore(trustStore);
+            c.setTruststoreType(trustStoreType);
+            c.setTrustPassword(trustStorePassword);
+          }
+          listener = c;
+
+        } else {
+          throw new HadoopIllegalArgumentException(
+              "unknown scheme for endpoint:" + ep);
+        }
+        listener.setHost(ep.getHost());
+        listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+        server.addManagedListener(listener);
+      }
+      server.loadListeners();
+      return server;
+    }
+  }
   
   /** Same as this(name, bindAddress, port, findPort, null); */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port, boolean findPort
       ) throws IOException {
     this(name, bindAddress, port, findPort, new Configuration());
   }
 
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, Connector connector) throws IOException {
     this(name, bindAddress, port, findPort, conf, null, connector, null);
@@ -149,6 +391,7 @@ public class HttpServer implements FilterContainer {
    * @param pathSpecs Path specifications that this httpserver will be serving. 
    *        These will be added to any filters.
    */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
     this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
@@ -163,11 +406,13 @@ public class HttpServer implements FilterContainer {
    *        increment by 1 until it finds a free port.
    * @param conf Configuration 
    */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf) throws IOException {
     this(name, bindAddress, port, findPort, conf, null, null, null);
   }
 
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl) 
       throws IOException {
@@ -185,6 +430,7 @@ public class HttpServer implements FilterContainer {
    * @param conf Configuration 
    * @param adminsAcl {@link AccessControlList} of the admins
    */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       Connector connector) throws IOException {
@@ -205,71 +451,64 @@ public class HttpServer implements FilterContainer {
    * @param pathSpecs Path specifications that this httpserver will be serving. 
    *        These will be added to any filters.
    */
+  @Deprecated
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       Connector connector, String[] pathSpecs) throws IOException {
-    webServer = new Server();
-    this.findPort = findPort;
-    this.adminsAcl = adminsAcl;
-    
-    if(connector == null) {
-      listenerStartedExternally = false;
-      if (HttpConfig.isSecure()) {
-        sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
-        try {
-          sslFactory.init();
-        } catch (GeneralSecurityException ex) {
-          throw new IOException(ex);
-        }
-        SslSocketConnector sslListener = new SslSocketConnector() {
-          @Override
-          protected SSLServerSocketFactory createFactory() throws Exception {
-            return sslFactory.createSSLServerSocketFactory();
-          }
-        };
-        listener = sslListener;
-      } else {
-        listener = createBaseListener(conf);
-      }
-      listener.setHost(bindAddress);
-      listener.setPort(port);
-      LOG.info("SSL is enabled on " + toString());
-    } else {
-      listenerStartedExternally = true;
-      listener = connector;
-    }
-    
-    webServer.addConnector(listener);
+    this(new Builder().setName(name).hostName(bindAddress)
+        .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
+        .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
+        .setConnector(connector).setPathSpec(pathSpecs));
+  }
+
+  private HttpServer(final Builder b) throws IOException {
+    final String appDir = getWebAppsPath(b.name);
+    this.webServer = new Server();
+    this.adminsAcl = b.adminsAcl;
+    this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    this.findPort = b.findPort;
+    initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
+  }
+
+  private void initializeWebServer(String name, String hostName,
+      Configuration conf, String[] pathSpecs)
+      throws FileNotFoundException, IOException {
+
+    Preconditions.checkNotNull(webAppContext);
 
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // default value (currently 250).
-    QueuedThreadPool threadPool = maxThreads == -1 ?
-        new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
+    QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool()
+        : new QueuedThreadPool(maxThreads);
     threadPool.setDaemon(true);
     webServer.setThreadPool(threadPool);
 
-    final String appDir = getWebAppsPath(name);
     ContextHandlerCollection contexts = new ContextHandlerCollection();
-    webServer.setHandler(contexts);
-
-    webAppContext = new WebAppContext();
-    webAppContext.setDisplayName(name);
-    webAppContext.setContextPath("/");
-    webAppContext.setWar(appDir + "/" + name);
-    webAppContext.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
-    webAppContext.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
-    addNoCacheFilter(webAppContext);
+    RequestLog requestLog = HttpRequestLog.getRequestLog(name);
+
+    if (requestLog != null) {
+      RequestLogHandler requestLogHandler = new RequestLogHandler();
+      requestLogHandler.setRequestLog(requestLog);
+      HandlerCollection handlers = new HandlerCollection();
+      handlers.setHandlers(new Handler[] { requestLogHandler, contexts });
+      webServer.setHandler(handlers);
+    } else {
+      webServer.setHandler(contexts);
+    }
+
+    final String appDir = getWebAppsPath(name);
+
     webServer.addHandler(webAppContext);
 
     addDefaultApps(contexts, appDir, conf);
-        
+
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
-    final FilterInitializer[] initializers = getFilterInitializers(conf); 
+    final FilterInitializer[] initializers = getFilterInitializers(conf);
     if (initializers != null) {
       conf = new Configuration(conf);
-      conf.set(BIND_ADDRESS, bindAddress);
-      for(FilterInitializer c : initializers) {
+      conf.set(BIND_ADDRESS, hostName);
+      for (FilterInitializer c : initializers) {
         c.initFilter(this, conf);
       }
     }
@@ -284,10 +523,29 @@ public class HttpServer implements FilterContainer {
     }
   }
 
-  @SuppressWarnings("unchecked")
-  private void addNoCacheFilter(WebAppContext ctxt) {
-    defineFilter(ctxt, NO_CACHE_FILTER,
-      NoCacheFilter.class.getName(), Collections.EMPTY_MAP, new String[] { "/*"});
+  private void addUnmanagedListener(Connector connector) {
+    listeners.add(new ListenerInfo(false, connector));
+  }
+
+  private void addManagedListener(Connector connector) {
+    listeners.add(new ListenerInfo(true, connector));
+  }
+
+  private static WebAppContext createWebAppContext(String name,
+      Configuration conf, AccessControlList adminsAcl, final String appDir) {
+    WebAppContext ctx = new WebAppContext();
+    ctx.setDisplayName(name);
+    ctx.setContextPath("/");
+    ctx.setWar(appDir + "/" + name);
+    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+    ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
+    addNoCacheFilter(ctx);
+    return ctx;
+  }
+
+  private static void addNoCacheFilter(WebAppContext ctxt) {
+    defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
+        Collections.<String, String> emptyMap(), new String[] { "/*" });
   }
 
   /**
@@ -352,7 +610,9 @@ public class HttpServer implements FilterContainer {
       if (conf.getBoolean(
           CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
           CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
-        logContext.getInitParams().put(
+        @SuppressWarnings("unchecked")
+        Map<String, String> params = logContext.getInitParams();
+        params.put(
             "org.mortbay.jetty.servlet.Default.aliases", "true");
       }
       logContext.setDisplayName("logs");
@@ -529,7 +789,7 @@ public class HttpServer implements FilterContainer {
   /**
    * Define a filter for a context and set up default url mappings.
    */
-  public void defineFilter(Context ctx, String name,
+  public static void defineFilter(Context ctx, String name,
       String classname, Map<String,String> parameters, String[] urls) {
 
     FilterHolder holder = new FilterHolder();
@@ -569,7 +829,7 @@ public class HttpServer implements FilterContainer {
   public Object getAttribute(String name) {
     return webAppContext.getAttribute(name);
   }
-
+  
   public WebAppContext getWebAppContext(){
     return this.webAppContext;
   }
@@ -593,80 +853,47 @@ public class HttpServer implements FilterContainer {
    * Get the port that the server is on
    * @return the port
    */
+  @Deprecated
   public int getPort() {
     return webServer.getConnectors()[0].getLocalPort();
   }
 
   /**
-   * Set the min, max number of worker threads (simultaneous connections).
+   * Get the address that corresponds to a particular connector.
+   *
+   * @return the corresponding address for the connector, or null if there's no
+   *         such connector or the connector is not bounded.
    */
-  public void setThreads(int min, int max) {
-    QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ;
-    pool.setMinThreads(min);
-    pool.setMaxThreads(max);
-  }
+  public InetSocketAddress getConnectorAddress(int index) {
+    Preconditions.checkArgument(index >= 0);
+    if (index > webServer.getConnectors().length)
+      return null;
 
-  /**
-   * Configure an ssl listener on the server.
-   * @param addr address to listen on
-   * @param keystore location of the keystore
-   * @param storPass password for the keystore
-   * @param keyPass password for the key
-   * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}
-   */
-  @Deprecated
-  public void addSslListener(InetSocketAddress addr, String keystore,
-      String storPass, String keyPass) throws IOException {
-    if (webServer.isStarted()) {
-      throw new IOException("Failed to add ssl listener");
+    Connector c = webServer.getConnectors()[index];
+    if (c.getLocalPort() == -1) {
+      // The connector is not bounded
+      return null;
     }
-    SslSocketConnector sslListener = new SslSocketConnector();
-    sslListener.setHost(addr.getHostName());
-    sslListener.setPort(addr.getPort());
-    sslListener.setKeystore(keystore);
-    sslListener.setPassword(storPass);
-    sslListener.setKeyPassword(keyPass);
-    webServer.addConnector(sslListener);
+
+    return new InetSocketAddress(c.getHost(), c.getLocalPort());
   }
 
   /**
-   * Configure an ssl listener on the server.
-   * @param addr address to listen on
-   * @param sslConf conf to retrieve ssl options
-   * @param needCertsAuth whether x509 certificate authentication is required
+   * Set the min, max number of worker threads (simultaneous connections).
    */
-  public void addSslListener(InetSocketAddress addr, Configuration sslConf,
-      boolean needCertsAuth) throws IOException {
-    if (webServer.isStarted()) {
-      throw new IOException("Failed to add ssl listener");
-    }
-    if (needCertsAuth) {
-      // setting up SSL truststore for authenticating clients
-      System.setProperty("javax.net.ssl.trustStore", sslConf.get(
-          "ssl.server.truststore.location", ""));
-      System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
-          "ssl.server.truststore.password", ""));
-      System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
-          "ssl.server.truststore.type", "jks"));
-    }
-    SslSocketConnector sslListener = new SslSocketConnector();
-    sslListener.setHost(addr.getHostName());
-    sslListener.setPort(addr.getPort());
-    sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
-    sslListener.setPassword(sslConf.get("ssl.server.keystore.password", ""));
-    sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", ""));
-    sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks"));
-    sslListener.setNeedClientAuth(needCertsAuth);
-    webServer.addConnector(sslListener);
+  public void setThreads(int min, int max) {
+    QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool();
+    pool.setMinThreads(min);
+    pool.setMaxThreads(max);
   }
-  
-  protected void initSpnego(Configuration conf,
+
+  private void initSpnego(Configuration conf, String hostName,
       String usernameConfKey, String keytabConfKey) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
     String principalInConf = conf.get(usernameConfKey);
     if (principalInConf != null && !principalInConf.isEmpty()) {
-      params.put("kerberos.principal",
-                 SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
+      params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
+          principalInConf, hostName));
     }
     String httpKeytab = conf.get(keytabConfKey);
     if (httpKeytab != null && !httpKeytab.isEmpty()) {
@@ -684,8 +911,7 @@ public class HttpServer implements FilterContainer {
   public void start() throws IOException {
     try {
       try {
-        openListener();
-        LOG.info("Jetty bound to port " + listener.getLocalPort());
+        openListeners();
         webServer.start();
       } catch (IOException ex) {
         LOG.info("HttpServer.start() threw a non Bind IOException", ex);
@@ -718,73 +944,65 @@ public class HttpServer implements FilterContainer {
     }
   }
 
+  private void loadListeners() {
+    for (ListenerInfo li : listeners) {
+      webServer.addConnector(li.listener);
+    }
+  }
+
   /**
    * Open the main listener for the server
    * @throws Exception
    */
-  void openListener() throws Exception {
-    if (listener.getLocalPort() != -1) { // it's already bound
-      return;
-    }
-    if (listenerStartedExternally) { // Expect that listener was started securely
-      throw new Exception("Expected webserver's listener to be started " +
-          "previously but wasn't");
-    }
-    int port = listener.getPort();
-    while (true) {
-      // jetty has a bug where you can't reopen a listener that previously
-      // failed to open w/o issuing a close first, even if the port is changed
-      try {
-        listener.close();
-        listener.open();
-        break;
-      } catch (BindException ex) {
-        if (port == 0 || !findPort) {
-          BindException be = new BindException(
-              "Port in use: " + listener.getHost() + ":" + listener.getPort());
-          be.initCause(ex);
-          throw be;
+  void openListeners() throws Exception {
+    for (ListenerInfo li : listeners) {
+      Connector listener = li.listener;
+      if (!li.isManaged || li.listener.getLocalPort() != -1) {
+        // This listener is either started externally or has been bound
+        continue;
+      }
+      int port = listener.getPort();
+      while (true) {
+        // jetty has a bug where you can't reopen a listener that previously
+        // failed to open w/o issuing a close first, even if the port is changed
+        try {
+          listener.close();
+          listener.open();
+          LOG.info("Jetty bound to port " + listener.getLocalPort());
+          break;
+        } catch (BindException ex) {
+          if (port == 0 || !findPort) {
+            BindException be = new BindException("Port in use: "
+                + listener.getHost() + ":" + listener.getPort());
+            be.initCause(ex);
+            throw be;
+          }
         }
+        // try the next port number
+        listener.setPort(++port);
+        Thread.sleep(100);
       }
-      // try the next port number
-      listener.setPort(++port);
-      Thread.sleep(100);
     }
   }
   
-  /**
-   * Return the bind address of the listener.
-   * @return InetSocketAddress of the listener
-   */
-  public InetSocketAddress getListenerAddress() {
-    int port = listener.getLocalPort();
-    if (port == -1) { // not bound, return requested port
-      port = listener.getPort();
-    }
-    return new InetSocketAddress(listener.getHost(), port);
-  }
-  
   /**
    * stop the server
    */
   public void stop() throws Exception {
     MultiException exception = null;
-    try {
-      listener.close();
-    } catch (Exception e) {
-      LOG.error("Error while stopping listener for webapp"
-          + webAppContext.getDisplayName(), e);
-      exception = addMultiException(exception, e);
-    }
+    for (ListenerInfo li : listeners) {
+      if (!li.isManaged) {
+        continue;
+      }
 
-    try {
-      if (sslFactory != null) {
-          sslFactory.destroy();
+      try {
+        li.listener.close();
+      } catch (Exception e) {
+        LOG.error(
+            "Error while stopping listener for webapp"
+                + webAppContext.getDisplayName(), e);
+        exception = addMultiException(exception, e);
       }
-    } catch (Exception e) {
-      LOG.error("Error while destroying the SSLFactory"
-          + webAppContext.getDisplayName(), e);
-      exception = addMultiException(exception, e);
     }
 
     try {
@@ -796,6 +1014,7 @@ public class HttpServer implements FilterContainer {
           + webAppContext.getDisplayName(), e);
       exception = addMultiException(exception, e);
     }
+
     try {
       webServer.stop();
     } catch (Exception e) {
@@ -836,10 +1055,17 @@ public class HttpServer implements FilterContainer {
    */
   @Override
   public String toString() {
-    return listener != null ?
-        ("HttpServer at http://" + listener.getHost() + ":" + listener.getLocalPort() + "/"
-            + (isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE))
-        : "Inactive HttpServer";
+    if (listeners.size() == 0) {
+      return "Inactive HttpServer";
+    } else {
+      StringBuilder sb = new StringBuilder("HttpServer (")
+        .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:");
+      for (ListenerInfo li : listeners) {
+        Connector l = li.listener;
+        sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
+      }
+      return sb.toString();
+    }
   }
 
   /**

+ 48 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface ByteBufferPool {
+  /**
+   * Get a new direct ByteBuffer.  The pool can provide this from
+   * removing a buffer from its internal cache, or by allocating a 
+   * new buffer.
+   *
+   * @param direct     Whether the buffer should be direct.
+   * @param length     The minimum length the buffer will have.
+   * @return           A new ByteBuffer.  This ByteBuffer must be direct.
+   *                   Its capacity can be less than what was requested, but
+   *                   must be at least 1 byte.
+   */
+  ByteBuffer getBuffer(boolean direct, int length);
+
+  /**
+   * Release a buffer back to the pool.
+   * The pool may choose to put this buffer into its cache.
+   *
+   * @param buffer    a direct bytebuffer
+   */
+  void putBuffer(ByteBuffer buffer);
+}

+ 118 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java

@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import com.google.common.collect.ComparisonChain;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is a simple ByteBufferPool which just creates ByteBuffers as needed.
+ * It also caches ByteBuffers after they're released.  It will always return
+ * the smallest cached buffer with at least the capacity you request.
+ * We don't try to do anything clever here like try to limit the maximum cache
+ * size.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public final class ElasticByteBufferPool implements ByteBufferPool {
+  private static final class Key implements Comparable<Key> {
+    private final int capacity;
+    private final long insertionTime;
+
+    Key(int capacity, long insertionTime) {
+      this.capacity = capacity;
+      this.insertionTime = insertionTime;
+    }
+
+    @Override
+    public int compareTo(Key other) {
+      return ComparisonChain.start().
+          compare(capacity, other.capacity).
+          compare(insertionTime, other.insertionTime).
+          result();
+    }
+
+    @Override
+    public boolean equals(Object rhs) {
+      if (rhs == null) {
+        return false;
+      }
+      try {
+        Key o = (Key)rhs;
+        return (compareTo(o) == 0);
+      } catch (ClassCastException e) {
+        return false;
+      }
+    }
+
+    @Override
+    public int hashCode() {
+      return new HashCodeBuilder().
+          append(capacity).
+          append(insertionTime).
+          toHashCode();
+    }
+  }
+
+  private final TreeMap<Key, ByteBuffer> buffers =
+      new TreeMap<Key, ByteBuffer>();
+
+  private final TreeMap<Key, ByteBuffer> directBuffers =
+      new TreeMap<Key, ByteBuffer>();
+
+  private final TreeMap<Key, ByteBuffer> getBufferTree(boolean direct) {
+    return direct ? directBuffers : buffers;
+  }
+  
+  @Override
+  public synchronized ByteBuffer getBuffer(boolean direct, int length) {
+    TreeMap<Key, ByteBuffer> tree = getBufferTree(direct);
+    Map.Entry<Key, ByteBuffer> entry =
+        tree.ceilingEntry(new Key(length, 0));
+    if (entry == null) {
+      return direct ? ByteBuffer.allocateDirect(length) :
+                      ByteBuffer.allocate(length);
+    }
+    tree.remove(entry.getKey());
+    return entry.getValue();
+  }
+
+  @Override
+  public synchronized void putBuffer(ByteBuffer buffer) {
+    TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
+    while (true) {
+      Key key = new Key(buffer.capacity(), System.nanoTime());
+      if (!tree.containsKey(key)) {
+        tree.put(key, buffer);
+        return;
+      }
+      // Buffers are indexed by (capacity, time).
+      // If our key is not unique on the first try, we try again, since the
+      // time will be different.  Since we use nanoseconds, it's pretty
+      // unlikely that we'll loop even once, unless the system clock has a
+      // poor granularity.
+    }
+  }
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java

@@ -203,8 +203,8 @@ public class ReadaheadPool {
       // It's also possible that we'll end up requesting readahead on some
       // other FD, which may be wasted work, but won't cause a problem.
       try {
-        NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, off, len,
-            NativeIO.POSIX.POSIX_FADV_WILLNEED);
+        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
+            fd, off, len, NativeIO.POSIX.POSIX_FADV_WILLNEED);
       } catch (IOException ioe) {
         if (canceled) {
           // no big deal - the reader canceled the request and closed

+ 1 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -454,10 +454,7 @@ public class Text extends BinaryComparable
   /** Read a UTF8 encoded string from in
    */
   public static String readString(DataInput in) throws IOException {
-    int length = WritableUtils.readVInt(in);
-    byte [] bytes = new byte[length];
-    in.readFully(bytes, 0, length);
-    return decode(bytes);
+    return readString(in, Integer.MAX_VALUE);
   }
   
   /** Read a UTF8 encoded string with a maximum size

+ 6 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableFactories.java

@@ -22,25 +22,26 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.ReflectionUtils;
-import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 /** Factories for non-public writables.  Defining a factory permits {@link
  * ObjectWritable} to be able to construct instances of non-public classes. */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class WritableFactories {
-  private static final HashMap<Class, WritableFactory> CLASS_TO_FACTORY =
-    new HashMap<Class, WritableFactory>();
+  private static final Map<Class, WritableFactory> CLASS_TO_FACTORY =
+    new ConcurrentHashMap<Class, WritableFactory>();
 
   private WritableFactories() {}                  // singleton
 
   /** Define a factory for a class. */
-  public static synchronized void setFactory(Class c, WritableFactory factory) {
+  public static void setFactory(Class c, WritableFactory factory) {
     CLASS_TO_FACTORY.put(c, factory);
   }
 
   /** Define a factory for a class. */
-  public static synchronized WritableFactory getFactory(Class c) {
+  public static WritableFactory getFactory(Class c) {
     return CLASS_TO_FACTORY.get(c);
   }
 

+ 16 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java

@@ -85,16 +85,15 @@ public class CodecPool {
     T codec = null;
     
     // Check if an appropriate codec is available
+    List<T> codecList;
     synchronized (pool) {
-      if (pool.containsKey(codecClass)) {
-        List<T> codecList = pool.get(codecClass);
-        
-        if (codecList != null) {
-          synchronized (codecList) {
-            if (!codecList.isEmpty()) {
-              codec = codecList.remove(codecList.size()-1);
-            }
-          }
+      codecList = pool.get(codecClass);
+    }
+
+    if (codecList != null) {
+      synchronized (codecList) {
+        if (!codecList.isEmpty()) {
+          codec = codecList.remove(codecList.size() - 1);
         }
       }
     }
@@ -105,15 +104,17 @@ public class CodecPool {
   private static <T> void payback(Map<Class<T>, List<T>> pool, T codec) {
     if (codec != null) {
       Class<T> codecClass = ReflectionUtils.getClass(codec);
+      List<T> codecList;
       synchronized (pool) {
-        if (!pool.containsKey(codecClass)) {
-          pool.put(codecClass, new ArrayList<T>());
+        codecList = pool.get(codecClass);
+        if (codecList == null) {
+          codecList = new ArrayList<T>();
+          pool.put(codecClass, codecList);
         }
+      }
 
-        List<T> codecList = pool.get(codecClass);
-        synchronized (codecList) {
-          codecList.add(codec);
-        }
+      synchronized (codecList) {
+        codecList.add(codec);
       }
     }
   }

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -28,11 +28,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class DefaultCodec implements Configurable, CompressionCodec {
+public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
   private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
   
   Configuration conf;
@@ -103,6 +104,15 @@ public class DefaultCodec implements Configurable, CompressionCodec {
     return ZlibFactory.getZlibDecompressor(conf);
   }
   
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return ZlibFactory.getZlibDirectDecompressor(conf);
+  }
+  
+  
   @Override
   public String getDefaultExtension() {
     return ".deflate";

+ 35 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressionCodec.java

@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This class encapsulates a codec which can decompress direct bytebuffers.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface DirectDecompressionCodec extends CompressionCodec {
+  /**
+   * Create a new {@link DirectDecompressor} for use by this {@link DirectDecompressionCodec}.
+   * 
+   * @return a new direct decompressor for use by this codec
+   */
+  DirectDecompressor createDirectDecompressor();
+}

+ 59 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressor.java

@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Specification of a direct ByteBuffer 'de-compressor'. 
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface DirectDecompressor {
+  /*
+   * This exposes a direct interface for record decompression with direct byte
+   * buffers.
+   * 
+   * The decompress() function need not always consume the buffers provided,
+   * it will need to be called multiple times to decompress an entire buffer 
+   * and the object will hold the compression context internally.
+   * 
+   * Codecs such as {@link SnappyCodec} may or may not support partial
+   * decompression of buffers and will need enough space in the destination
+   * buffer to decompress an entire block.
+   * 
+   * The operation is modelled around dst.put(src);
+   * 
+   * The end result will move src.position() by the bytes-read and
+   * dst.position() by the bytes-written. It should not modify the src.limit()
+   * or dst.limit() to maintain consistency of operation between codecs.
+   * 
+   * @param src Source direct {@link ByteBuffer} for reading from. Requires src
+   * != null and src.remaining() > 0
+   * 
+   * @param dst Destination direct {@link ByteBuffer} for storing the results
+   * into. Requires dst != null and dst.remaining() to be > 0
+   * 
+   * @throws IOException if compression fails
+   */
+  public void decompress(ByteBuffer src, ByteBuffer dst) throws IOException;
+}

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.zlib.*;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
+
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
@@ -163,6 +165,13 @@ public class GzipCodec extends DefaultCodec {
       ? GzipZlibDecompressor.class
       : BuiltInGzipDecompressor.class;
   }
+    
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return ZlibFactory.isNativeZlibLoaded(conf) 
+        ? new ZlibDecompressor.ZlibDirectDecompressor(
+          ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB, 0) : null;
+  }
 
   @Override
   public String getDefaultExtension() {

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java

@@ -107,7 +107,7 @@ public class Lz4Codec implements Configurable, CompressionCodec {
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
 
-    int compressionOverhead = Math.max((int)(bufferSize * 0.01), 10);
+    int compressionOverhead = bufferSize/255 + 16;
 
     return new BlockCompressorStream(out, compressor, bufferSize,
         compressionOverhead);
@@ -140,7 +140,10 @@ public class Lz4Codec implements Configurable, CompressionCodec {
     int bufferSize = conf.getInt(
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
-    return new Lz4Compressor(bufferSize);
+    boolean useLz4HC = conf.getBoolean(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
+    return new Lz4Compressor(bufferSize, useLz4HC);
   }
 
   /**

+ 10 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java

@@ -26,13 +26,14 @@ import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
 import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
+import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
  * This class creates snappy compressors/decompressors.
  */
-public class SnappyCodec implements Configurable, CompressionCodec {
+public class SnappyCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
   Configuration conf;
 
   /**
@@ -203,6 +204,14 @@ public class SnappyCodec implements Configurable, CompressionCodec {
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
     return new SnappyDecompressor(bufferSize);
   }
+  
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return isNativeCodeLoaded() ? new SnappyDirectDecompressor() : null;
+  }
 
   /**
    * Get the default filename extension for this kind of compression.

+ 17 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java

@@ -52,6 +52,7 @@ public class Lz4Compressor implements Compressor {
   private long bytesRead = 0L;
   private long bytesWritten = 0L;
 
+  private final boolean useLz4HC;
 
   static {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
@@ -72,8 +73,11 @@ public class Lz4Compressor implements Compressor {
    * Creates a new compressor.
    *
    * @param directBufferSize size of the direct buffer to be used.
+   * @param useLz4HC use high compression ratio version of lz4, 
+   *                 which trades CPU for compression ratio.
    */
-  public Lz4Compressor(int directBufferSize) {
+  public Lz4Compressor(int directBufferSize, boolean useLz4HC) {
+    this.useLz4HC = useLz4HC;
     this.directBufferSize = directBufferSize;
 
     uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
@@ -81,6 +85,15 @@ public class Lz4Compressor implements Compressor {
     compressedDirectBuf.position(directBufferSize);
   }
 
+  /**
+   * Creates a new compressor.
+   *
+   * @param directBufferSize size of the direct buffer to be used.
+   */
+  public Lz4Compressor(int directBufferSize) {
+    this(directBufferSize, false);
+  }
+
   /**
    * Creates a new compressor with the default buffer size.
    */
@@ -227,7 +240,7 @@ public class Lz4Compressor implements Compressor {
     }
 
     // Compress data
-    n = compressBytesDirect();
+    n = useLz4HC ? compressBytesDirectHC() : compressBytesDirect();
     compressedDirectBuf.limit(n);
     uncompressedDirectBuf.clear(); // lz4 consumes all buffer input
 
@@ -297,5 +310,7 @@ public class Lz4Compressor implements Compressor {
 
   private native int compressBytesDirect();
 
+  private native int compressBytesDirectHC();
+
   public native static String getLibraryName();
 }

+ 72 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java

@@ -25,6 +25,7 @@ import java.nio.ByteBuffer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
@@ -282,4 +283,75 @@ public class SnappyDecompressor implements Decompressor {
   private native static void initIDs();
 
   private native int decompressBytesDirect();
+  
+  int decompressDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
+    assert (this instanceof SnappyDirectDecompressor);
+    
+    ByteBuffer presliced = dst;
+    if (dst.position() > 0) {
+      presliced = dst;
+      dst = dst.slice();
+    }
+
+    Buffer originalCompressed = compressedDirectBuf;
+    Buffer originalUncompressed = uncompressedDirectBuf;
+    int originalBufferSize = directBufferSize;
+    compressedDirectBuf = src.slice();
+    compressedDirectBufLen = src.remaining();
+    uncompressedDirectBuf = dst;
+    directBufferSize = dst.remaining();
+    int n = 0;
+    try {
+      n = decompressBytesDirect();
+      presliced.position(presliced.position() + n);
+      // SNAPPY always consumes the whole buffer or throws an exception
+      src.position(src.limit());
+      finished = true;
+    } finally {
+      compressedDirectBuf = originalCompressed;
+      uncompressedDirectBuf = originalUncompressed;
+      compressedDirectBufLen = 0;
+      directBufferSize = originalBufferSize;
+    }
+    return n;
+  }
+  
+  public static class SnappyDirectDecompressor extends SnappyDecompressor implements
+      DirectDecompressor {
+    
+    @Override
+    public boolean finished() {
+      return (endOfInput && super.finished());
+    }
+
+    @Override
+    public void reset() {
+      super.reset();
+      endOfInput = true;
+    }
+
+    private boolean endOfInput;
+
+    @Override
+    public synchronized void decompress(ByteBuffer src, ByteBuffer dst)
+        throws IOException {
+      assert dst.isDirect() : "dst.isDirect()";
+      assert src.isDirect() : "src.isDirect()";
+      assert dst.remaining() > 0 : "dst.remaining() > 0";
+      this.decompressDirect(src, dst);
+      endOfInput = !src.hasRemaining();
+    }
+
+    @Override
+    public synchronized void setDictionary(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+
+    @Override
+    public synchronized int decompress(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+  }
 }

+ 84 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java

@@ -23,6 +23,7 @@ import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
@@ -106,7 +107,7 @@ public class ZlibDecompressor implements Decompressor {
    */
   public ZlibDecompressor(CompressionHeader header, int directBufferSize) {
     this.header = header;
-    this.directBufferSize = directBufferSize;
+    this.directBufferSize = directBufferSize;    
     compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
     uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
     uncompressedDirectBuf.position(directBufferSize);
@@ -310,4 +311,86 @@ public class ZlibDecompressor implements Decompressor {
   private native static int getRemaining(long strm);
   private native static void reset(long strm);
   private native static void end(long strm);
+    
+  int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
+    assert (this instanceof ZlibDirectDecompressor);
+    
+    ByteBuffer presliced = dst;
+    if (dst.position() > 0) {
+      presliced = dst;
+      dst = dst.slice();
+    }
+
+    Buffer originalCompressed = compressedDirectBuf;
+    Buffer originalUncompressed = uncompressedDirectBuf;
+    int originalBufferSize = directBufferSize;
+    compressedDirectBuf = src;
+    compressedDirectBufOff = src.position();
+    compressedDirectBufLen = src.remaining();
+    uncompressedDirectBuf = dst;
+    directBufferSize = dst.remaining();
+    int n = 0;
+    try {
+      n = inflateBytesDirect();
+      presliced.position(presliced.position() + n);
+      if (compressedDirectBufLen > 0) {
+        src.position(compressedDirectBufOff);
+      } else {
+        src.position(src.limit());
+      }
+    } finally {
+      compressedDirectBuf = originalCompressed;
+      uncompressedDirectBuf = originalUncompressed;
+      compressedDirectBufOff = 0;
+      compressedDirectBufLen = 0;
+      directBufferSize = originalBufferSize;
+    }
+    return n;
+  }
+  
+  public static class ZlibDirectDecompressor 
+      extends ZlibDecompressor implements DirectDecompressor {
+    public ZlibDirectDecompressor() {
+      super(CompressionHeader.DEFAULT_HEADER, 0);
+    }
+
+    public ZlibDirectDecompressor(CompressionHeader header, int directBufferSize) {
+      super(header, directBufferSize);
+    }
+    
+    @Override
+    public boolean finished() {
+      return (endOfInput && super.finished());
+    }
+    
+    @Override
+    public void reset() {
+      super.reset();
+      endOfInput = true;
+    }
+    
+    private boolean endOfInput;
+
+    @Override
+    public synchronized void decompress(ByteBuffer src, ByteBuffer dst)
+        throws IOException {
+      assert dst.isDirect() : "dst.isDirect()";
+      assert src.isDirect() : "src.isDirect()";
+      assert dst.remaining() > 0 : "dst.remaining() > 0";      
+      this.inflateDirect(src, dst);
+      endOfInput = !src.hasRemaining();
+    }
+
+    @Override
+    public synchronized void setDictionary(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+
+    @Override
+    public synchronized int decompress(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+  }
 }

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
 import org.apache.hadoop.util.NativeCodeLoader;
@@ -116,6 +117,17 @@ public class ZlibFactory {
     return (isNativeZlibLoaded(conf)) ? 
       new ZlibDecompressor() : new BuiltInZlibInflater(); 
   }
+  
+  /**
+   * Return the appropriate implementation of the zlib direct decompressor. 
+   * 
+   * @param conf configuration
+   * @return the appropriate implementation of the zlib decompressor.
+   */
+  public static DirectDecompressor getZlibDirectDecompressor(Configuration conf) {
+    return (isNativeZlibLoaded(conf)) ? 
+      new ZlibDecompressor.ZlibDirectDecompressor() : null; 
+  }
 
   public static void setCompressionStrategy(Configuration conf,
       CompressionStrategy strategy) {

+ 165 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -23,6 +23,9 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.lang.reflect.Field;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -33,10 +36,11 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import sun.misc.Unsafe;
+
 import com.google.common.annotations.VisibleForTesting;
 
 /**
@@ -94,9 +98,6 @@ public class NativeIO {
 
     private static final Log LOG = LogFactory.getLog(NativeIO.class);
 
-    @VisibleForTesting
-    public static CacheTracker cacheTracker = null;
-    
     private static boolean nativeLoaded = false;
     private static boolean fadvisePossible = true;
     private static boolean syncFileRangePossible = true;
@@ -107,10 +108,71 @@ public class NativeIO {
 
     private static long cacheTimeout = -1;
 
-    public static interface CacheTracker {
-      public void fadvise(String identifier, long offset, long len, int flags);
+    private static CacheManipulator cacheManipulator = new CacheManipulator();
+
+    public static CacheManipulator getCacheManipulator() {
+      return cacheManipulator;
     }
-    
+
+    public static void setCacheManipulator(CacheManipulator cacheManipulator) {
+      POSIX.cacheManipulator = cacheManipulator;
+    }
+
+    /**
+     * Used to manipulate the operating system cache.
+     */
+    @VisibleForTesting
+    public static class CacheManipulator {
+      public void mlock(String identifier, ByteBuffer buffer,
+          long len) throws IOException {
+        POSIX.mlock(buffer, len);
+      }
+
+      public long getMemlockLimit() {
+        return NativeIO.getMemlockLimit();
+      }
+
+      public long getOperatingSystemPageSize() {
+        return NativeIO.getOperatingSystemPageSize();
+      }
+
+      public void posixFadviseIfPossible(String identifier,
+        FileDescriptor fd, long offset, long len, int flags)
+            throws NativeIOException {
+        NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
+            len, flags);
+      }
+
+      public boolean verifyCanMlock() {
+        return NativeIO.isAvailable();
+      }
+    }
+
+    /**
+     * A CacheManipulator used for testing which does not actually call mlock.
+     * This allows many tests to be run even when the operating system does not
+     * allow mlock, or only allows limited mlocking.
+     */
+    @VisibleForTesting
+    public static class NoMlockCacheManipulator extends CacheManipulator {
+      public void mlock(String identifier, ByteBuffer buffer,
+          long len) throws IOException {
+        LOG.info("mlocking " + identifier);
+      }
+
+      public long getMemlockLimit() {
+        return 1125899906842624L;
+      }
+
+      public long getOperatingSystemPageSize() {
+        return 4096;
+      }
+
+      public boolean verifyCanMlock() {
+        return true;
+      }
+    }
+
     static {
       if (NativeCodeLoader.isNativeCodeLoaded()) {
         try {
@@ -145,6 +207,12 @@ public class NativeIO {
       return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
     }
 
+    private static void assertCodeLoaded() throws IOException {
+      if (!isAvailable()) {
+        throw new IOException("NativeIO was not loaded");
+      }
+    }
+
     /** Wrapper around open(2) */
     public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
     /** Wrapper around fstat(2) */
@@ -187,12 +255,9 @@ public class NativeIO {
      *
      * @throws NativeIOException if there is an error with the syscall
      */
-    public static void posixFadviseIfPossible(String identifier,
+    static void posixFadviseIfPossible(String identifier,
         FileDescriptor fd, long offset, long len, int flags)
         throws NativeIOException {
-      if (cacheTracker != null) {
-        cacheTracker.fadvise(identifier, offset, len, flags);
-      }
       if (nativeLoaded && fadvisePossible) {
         try {
           posix_fadvise(fd, offset, len, flags);
@@ -225,6 +290,66 @@ public class NativeIO {
       }
     }
 
+    static native void mlock_native(
+        ByteBuffer buffer, long len) throws NativeIOException;
+    static native void munlock_native(
+        ByteBuffer buffer, long len) throws NativeIOException;
+
+    /**
+     * Locks the provided direct ByteBuffer into memory, preventing it from
+     * swapping out. After a buffer is locked, future accesses will not incur
+     * a page fault.
+     * 
+     * See the mlock(2) man page for more information.
+     * 
+     * @throws NativeIOException
+     */
+    static void mlock(ByteBuffer buffer, long len)
+        throws IOException {
+      assertCodeLoaded();
+      if (!buffer.isDirect()) {
+        throw new IOException("Cannot mlock a non-direct ByteBuffer");
+      }
+      mlock_native(buffer, len);
+    }
+
+    /**
+     * Unlocks a locked direct ByteBuffer, allowing it to swap out of memory.
+     * This is a no-op if the ByteBuffer was not previously locked.
+     * 
+     * See the munlock(2) man page for more information.
+     * 
+     * @throws NativeIOException
+     */
+    public static void munlock(ByteBuffer buffer, long len)
+        throws IOException {
+      assertCodeLoaded();
+      if (!buffer.isDirect()) {
+        throw new IOException("Cannot munlock a non-direct ByteBuffer");
+      }
+      munlock_native(buffer, len);
+    }
+    
+    /**
+     * Unmaps the block from memory. See munmap(2).
+     *
+     * There isn't any portable way to unmap a memory region in Java.
+     * So we use the sun.nio method here.
+     * Note that unmapping a memory region could cause crashes if code
+     * continues to reference the unmapped code.  However, if we don't
+     * manually unmap the memory, we are dependent on the finalizer to
+     * do it, and we have no idea when the finalizer will run.
+     *
+     * @param buffer    The buffer to unmap.
+     */
+    public static void munmap(MappedByteBuffer buffer) {
+      if (buffer instanceof sun.nio.ch.DirectBuffer) {
+        sun.misc.Cleaner cleaner =
+            ((sun.nio.ch.DirectBuffer)buffer).cleaner();
+        cleaner.clean();
+      }
+    }
+
     /** Linux only methods used for getOwner() implementation */
     private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException;
     private static native String getUserName(long uid) throws IOException;
@@ -478,6 +603,35 @@ public class NativeIO {
   /** Initialize the JNI method ID and class ID cache */
   private static native void initNative();
 
+  /**
+   * Get the maximum number of bytes that can be locked into memory at any
+   * given point.
+   *
+   * @return 0 if no bytes can be locked into memory;
+   *         Long.MAX_VALUE if there is no limit;
+   *         The number of bytes that can be locked into memory otherwise.
+   */
+  static long getMemlockLimit() {
+    return isAvailable() ? getMemlockLimit0() : 0;
+  }
+
+  private static native long getMemlockLimit0();
+  
+  /**
+   * @return the operating system's page size.
+   */
+  static long getOperatingSystemPageSize() {
+    try {
+      Field f = Unsafe.class.getDeclaredField("theUnsafe");
+      f.setAccessible(true);
+      Unsafe unsafe = (Unsafe)f.get(null);
+      return unsafe.pageSize();
+    } catch (Throwable e) {
+      LOG.warn("Unable to get operating system page size.  Guessing 4096.", e);
+      return 4096;
+    }
+  }
+
   private static class CachedUid {
     final long timestamp;
     final String username;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java

@@ -18,9 +18,9 @@
 package org.apache.hadoop.io.retry;
 
 import java.lang.reflect.Method;
-import java.net.UnknownHostException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.RetriableException;
 
 /**
  * A dummy invocation handler extending RetryInvocationHandler. It drops the
@@ -52,7 +52,7 @@ public class LossyRetryInvocationHandler<T> extends RetryInvocationHandler<T> {
     if (retryCount < this.numToDrop) {
       RetryCount.set(++retryCount);
       LOG.info("Drop the response. Current retryCount == " + retryCount);
-      throw new UnknownHostException("Fake Exception");
+      throw new RetriableException("Fake Exception");
     } else {
       LOG.info("retryCount == " + retryCount
           + ". It's time to normally process the response");

+ 10 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -68,7 +68,7 @@ public class RetryPolicies {
    * </p>
    */
   public static final RetryPolicy RETRY_FOREVER = new RetryForever();
-  
+
   /**
    * <p>
    * Keep trying a limited number of times, waiting a fixed time between attempts,
@@ -558,27 +558,25 @@ public class RetryPolicies {
           isWrappedStandbyException(e)) {
         return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
             getFailoverOrRetrySleepTime(failovers));
-      } else if (e instanceof SocketException ||
-                 (e instanceof IOException && !(e instanceof RemoteException))) {
+      } else if (e instanceof RetriableException
+          || getWrappedRetriableException(e) != null) {
+        // RetriableException or RetriableException wrapped 
+        return new RetryAction(RetryAction.RetryDecision.RETRY,
+              getFailoverOrRetrySleepTime(retries));
+      } else if (e instanceof SocketException
+          || (e instanceof IOException && !(e instanceof RemoteException))) {
         if (isIdempotentOrAtMostOnce) {
           return RetryAction.FAILOVER_AND_RETRY;
         } else {
           return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
-              "the invoked method is not idempotent, and unable to determine " +
-              "whether it was invoked");
+              "the invoked method is not idempotent, and unable to determine "
+                  + "whether it was invoked");
         }
       } else {
-        RetriableException re = getWrappedRetriableException(e);
-        if (re != null) {
-          return new RetryAction(RetryAction.RetryDecision.RETRY,
-              getFailoverOrRetrySleepTime(retries));
-        } else {
           return fallbackPolicy.shouldRetry(e, retries, failovers,
               isIdempotentOrAtMostOnce);
-        }
       }
     }
-    
   }
 
   /**

+ 75 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -138,17 +138,70 @@ public class Client {
   final static int CONNECTION_CONTEXT_CALL_ID = -3;
   
   /**
-   * Executor on which IPC calls' parameters are sent. Deferring
-   * the sending of parameters to a separate thread isolates them
-   * from thread interruptions in the calling code.
+   * Executor on which IPC calls' parameters are sent.
+   * Deferring the sending of parameters to a separate
+   * thread isolates them from thread interruptions in the
+   * calling code.
    */
-  private static final ExecutorService SEND_PARAMS_EXECUTOR = 
-    Executors.newCachedThreadPool(
-        new ThreadFactoryBuilder()
-        .setDaemon(true)
-        .setNameFormat("IPC Parameter Sending Thread #%d")
-        .build());
+  private final ExecutorService sendParamsExecutor;
+  private final static ClientExecutorServiceFactory clientExcecutorFactory =
+      new ClientExecutorServiceFactory();
 
+  private static class ClientExecutorServiceFactory {
+    private int executorRefCount = 0;
+    private ExecutorService clientExecutor = null;
+    
+    /**
+     * Get Executor on which IPC calls' parameters are sent.
+     * If the internal reference counter is zero, this method
+     * creates the instance of Executor. If not, this method
+     * just returns the reference of clientExecutor.
+     * 
+     * @return An ExecutorService instance
+     */
+    synchronized ExecutorService refAndGetInstance() {
+      if (executorRefCount == 0) {
+        clientExecutor = Executors.newCachedThreadPool(
+            new ThreadFactoryBuilder()
+            .setDaemon(true)
+            .setNameFormat("IPC Parameter Sending Thread #%d")
+            .build());
+      }
+      executorRefCount++;
+      
+      return clientExecutor;
+    }
+    
+    /**
+     * Cleanup Executor on which IPC calls' parameters are sent.
+     * If reference counter is zero, this method discards the
+     * instance of the Executor. If not, this method
+     * just decrements the internal reference counter.
+     * 
+     * @return An ExecutorService instance if it exists.
+     *   Null is returned if not.
+     */
+    synchronized ExecutorService unrefAndCleanup() {
+      executorRefCount--;
+      assert(executorRefCount >= 0);
+      
+      if (executorRefCount == 0) {
+        clientExecutor.shutdown();
+        try {
+          if (!clientExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
+            clientExecutor.shutdownNow();
+          }
+        } catch (InterruptedException e) {
+          LOG.error("Interrupted while waiting for clientExecutor" +
+              "to stop", e);
+          clientExecutor.shutdownNow();
+        }
+        clientExecutor = null;
+      }
+      
+      return clientExecutor;
+    }
+  };
   
   /**
    * set the ping interval value in configuration
@@ -233,7 +286,7 @@ public class Client {
       if (!Arrays.equals(id, RpcConstants.DUMMY_CLIENT_ID)) {
         if (!Arrays.equals(id, clientId)) {
           throw new IOException("Client IDs not matched: local ID="
-              + StringUtils.byteToHexString(clientId) + ", ID in reponse="
+              + StringUtils.byteToHexString(clientId) + ", ID in response="
               + StringUtils.byteToHexString(header.getClientId().toByteArray()));
         }
       }
@@ -918,7 +971,8 @@ public class Client {
       }
 
       // Serialize the call to be sent. This is done from the actual
-      // caller thread, rather than the SEND_PARAMS_EXECUTOR thread,
+      // caller thread, rather than the sendParamsExecutor thread,
+      
       // so that if the serialization throws an error, it is reported
       // properly. This also parallelizes the serialization.
       //
@@ -936,7 +990,7 @@ public class Client {
       call.rpcRequest.write(d);
 
       synchronized (sendRpcRequestLock) {
-        Future<?> senderFuture = SEND_PARAMS_EXECUTOR.submit(new Runnable() {
+        Future<?> senderFuture = sendParamsExecutor.submit(new Runnable() {
           @Override
           public void run() {
             try {
@@ -1132,6 +1186,7 @@ public class Client {
     this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
     this.clientId = ClientId.getClientId();
+    this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
   }
 
   /**
@@ -1176,6 +1231,8 @@ public class Client {
       } catch (InterruptedException e) {
       }
     }
+    
+    clientExcecutorFactory.unrefAndCleanup();
   }
 
   /**
@@ -1506,8 +1563,13 @@ public class Client {
         final int max = conf.getInt(
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
+        final int retryInterval = conf.getInt(
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY,
+            CommonConfigurationKeysPublic
+                .IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT);
+
         connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-            max, 1, TimeUnit.SECONDS);
+            max, retryInterval, TimeUnit.MILLISECONDS);
       }
 
       boolean doPing =

+ 213 - 167
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -51,11 +51,13 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
@@ -342,17 +344,8 @@ public abstract class Server {
   private int port;                               // port we listen on
   private int handlerCount;                       // number of handler threads
   private int readThreads;                        // number of read threads
+  private int readerPendingConnectionQueue;         // number of connections to queue per read thread
   private Class<? extends Writable> rpcRequestClass;   // class used for deserializing the rpc request
-  private int maxIdleTime;                        // the maximum idle time after 
-                                                  // which a client may be disconnected
-  private int thresholdIdleConnections;           // the number of idle connections
-                                                  // after which we will start
-                                                  // cleaning up idle 
-                                                  // connections
-  int maxConnectionsToNuke;                       // the max number of 
-                                                  // connections to nuke
-                                                  //during a cleanup
-  
   protected RpcMetrics rpcMetrics;
   protected RpcDetailedMetrics rpcDetailedMetrics;
   
@@ -370,13 +363,10 @@ public abstract class Server {
   volatile private boolean running = true;         // true while server runs
   private BlockingQueue<Call> callQueue; // queued calls
 
-  private List<Connection> connectionList = 
-    Collections.synchronizedList(new LinkedList<Connection>());
-  //maintain a list
-  //of client connections
+  // maintains the set of client connections and handles idle timeouts
+  private ConnectionManager connectionManager;
   private Listener listener = null;
   private Responder responder = null;
-  private int numConnections = 0;
   private Handler[] handlers = null;
 
   /**
@@ -446,8 +436,8 @@ public abstract class Server {
   }
 
   @VisibleForTesting
-  List<Connection> getConnections() {
-    return connectionList;
+  Connection[] getConnections() {
+    return connectionManager.toArray();
   }
 
   /**
@@ -515,11 +505,6 @@ public abstract class Server {
     private Reader[] readers = null;
     private int currentReader = 0;
     private InetSocketAddress address; //the address we bind at
-    private Random rand = new Random();
-    private long lastCleanupRunTime = 0; //the last time when a cleanup connec-
-                                         //-tion (for idle connections) ran
-    private long cleanupInterval = 10000; //the minimum interval between 
-                                          //two cleanup runs
     private int backlogLength = conf.getInt(
         CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_KEY,
         CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT);
@@ -550,25 +535,27 @@ public abstract class Server {
     }
     
     private class Reader extends Thread {
-      private volatile boolean adding = false;
+      final private BlockingQueue<Connection> pendingConnections;
       private final Selector readSelector;
 
       Reader(String name) throws IOException {
         super(name);
 
+        this.pendingConnections =
+            new LinkedBlockingQueue<Connection>(readerPendingConnectionQueue);
         this.readSelector = Selector.open();
       }
       
       @Override
       public void run() {
-        LOG.info("Starting " + getName());
+        LOG.info("Starting " + Thread.currentThread().getName());
         try {
           doRunLoop();
         } finally {
           try {
             readSelector.close();
           } catch (IOException ioe) {
-            LOG.error("Error closing read selector in " + this.getName(), ioe);
+            LOG.error("Error closing read selector in " + Thread.currentThread().getName(), ioe);
           }
         }
       }
@@ -577,10 +564,14 @@ public abstract class Server {
         while (running) {
           SelectionKey key = null;
           try {
+            // consume as many connections as currently queued to avoid
+            // unbridled acceptance of connections that starves the select
+            int size = pendingConnections.size();
+            for (int i=size; i>0; i--) {
+              Connection conn = pendingConnections.take();
+              conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
+            }
             readSelector.select();
-            while (adding) {
-              this.wait(1000);
-            }              
 
             Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
             while (iter.hasNext()) {
@@ -595,7 +586,7 @@ public abstract class Server {
             }
           } catch (InterruptedException e) {
             if (running) {                      // unexpected -- log it
-              LOG.info(getName() + " unexpectedly interrupted", e);
+              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
             }
           } catch (IOException ex) {
             LOG.error("Error in Reader", ex);
@@ -604,26 +595,14 @@ public abstract class Server {
       }
 
       /**
-       * This gets reader into the state that waits for the new channel
-       * to be registered with readSelector. If it was waiting in select()
-       * the thread will be woken up, otherwise whenever select() is called
-       * it will return even if there is nothing to read and wait
-       * in while(adding) for finishAdd call
+       * Updating the readSelector while it's being used is not thread-safe,
+       * so the connection must be queued.  The reader will drain the queue
+       * and update its readSelector before performing the next select
        */
-      public void startAdd() {
-        adding = true;
+      public void addConnection(Connection conn) throws InterruptedException {
+        pendingConnections.put(conn);
         readSelector.wakeup();
       }
-      
-      public synchronized SelectionKey registerChannel(SocketChannel channel)
-                                                          throws IOException {
-          return channel.register(readSelector, SelectionKey.OP_READ);
-      }
-
-      public synchronized void finishAdd() {
-        adding = false;
-        this.notify();        
-      }
 
       void shutdown() {
         assert !running;
@@ -635,58 +614,12 @@ public abstract class Server {
         }
       }
     }
-    /** cleanup connections from connectionList. Choose a random range
-     * to scan and also have a limit on the number of the connections
-     * that will be cleanedup per run. The criteria for cleanup is the time
-     * for which the connection was idle. If 'force' is true then all 
-     * connections will be looked at for the cleanup.
-     */
-    private void cleanupConnections(boolean force) {
-      if (force || numConnections > thresholdIdleConnections) {
-        long currentTime = Time.now();
-        if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
-          return;
-        }
-        int start = 0;
-        int end = numConnections - 1;
-        if (!force) {
-          start = rand.nextInt() % numConnections;
-          end = rand.nextInt() % numConnections;
-          int temp;
-          if (end < start) {
-            temp = start;
-            start = end;
-            end = temp;
-          }
-        }
-        int i = start;
-        int numNuked = 0;
-        while (i <= end) {
-          Connection c;
-          synchronized (connectionList) {
-            try {
-              c = connectionList.get(i);
-            } catch (Exception e) {return;}
-          }
-          if (c.timedOut(currentTime)) {
-            if (LOG.isDebugEnabled())
-              LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
-            closeConnection(c);
-            numNuked++;
-            end--;
-            c = null;
-            if (!force && numNuked == maxConnectionsToNuke) break;
-          }
-          else i++;
-        }
-        lastCleanupRunTime = Time.now();
-      }
-    }
 
     @Override
     public void run() {
-      LOG.info(getName() + ": starting");
+      LOG.info(Thread.currentThread().getName() + ": starting");
       SERVER.set(Server.this);
+      connectionManager.startIdleScan();
       while (running) {
         SelectionKey key = null;
         try {
@@ -710,14 +643,13 @@ public abstract class Server {
           // some thread(s) a chance to finish
           LOG.warn("Out of Memory in server select", e);
           closeCurrentConnection(key, e);
-          cleanupConnections(true);
+          connectionManager.closeIdle(true);
           try { Thread.sleep(60000); } catch (Exception ie) {}
         } catch (Exception e) {
           closeCurrentConnection(key, e);
         }
-        cleanupConnections(false);
       }
-      LOG.info("Stopping " + this.getName());
+      LOG.info("Stopping " + Thread.currentThread().getName());
 
       synchronized (this) {
         try {
@@ -728,10 +660,9 @@ public abstract class Server {
         selector= null;
         acceptChannel= null;
         
-        // clean up all connections
-        while (!connectionList.isEmpty()) {
-          closeConnection(connectionList.remove(0));
-        }
+        // close all connections
+        connectionManager.stopIdleScan();
+        connectionManager.closeAll();
       }
     }
 
@@ -739,8 +670,6 @@ public abstract class Server {
       if (key != null) {
         Connection c = (Connection)key.attachment();
         if (c != null) {
-          if (LOG.isDebugEnabled())
-            LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
           closeConnection(c);
           c = null;
         }
@@ -751,8 +680,7 @@ public abstract class Server {
       return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
     }
     
-    void doAccept(SelectionKey key) throws IOException,  OutOfMemoryError {
-      Connection c = null;
+    void doAccept(SelectionKey key) throws InterruptedException, IOException,  OutOfMemoryError {
       ServerSocketChannel server = (ServerSocketChannel) key.channel();
       SocketChannel channel;
       while ((channel = server.accept()) != null) {
@@ -762,22 +690,9 @@ public abstract class Server {
         channel.socket().setKeepAlive(true);
         
         Reader reader = getReader();
-        try {
-          reader.startAdd();
-          SelectionKey readKey = reader.registerChannel(channel);
-          c = new Connection(readKey, channel, Time.now());
-          readKey.attach(c);
-          synchronized (connectionList) {
-            connectionList.add(numConnections, c);
-            numConnections++;
-          }
-          if (LOG.isDebugEnabled())
-            LOG.debug("Server connection from " + c.toString() +
-                "; # active connections: " + numConnections +
-                "; # queued calls: " + callQueue.size());          
-        } finally {
-          reader.finishAdd(); 
-        }
+        Connection c = connectionManager.register(channel);
+        key.attach(c);  // so closeCurrentConnection can get the object
+        reader.addConnection(c);
       }
     }
 
@@ -792,23 +707,19 @@ public abstract class Server {
       try {
         count = c.readAndProcess();
       } catch (InterruptedException ieo) {
-        LOG.info(getName() + ": readAndProcess caught InterruptedException", ieo);
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         // a WrappedRpcServerException is an exception that has been sent
         // to the client, so the stacktrace is unnecessary; any other
         // exceptions are unexpected internal server errors and thus the
         // stacktrace should be logged
-        LOG.info(getName() + ": readAndProcess from client " +
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess from client " +
             c.getHostAddress() + " threw exception [" + e + "]",
             (e instanceof WrappedRpcServerException) ? null : e);
         count = -1; //so that the (count < 0) block is executed
       }
       if (count < 0) {
-        if (LOG.isDebugEnabled())
-          LOG.debug(getName() + ": disconnecting client " + 
-                    c + ". Number of active connections: "+
-                    numConnections);
         closeConnection(c);
         c = null;
       }
@@ -826,7 +737,7 @@ public abstract class Server {
         try {
           acceptChannel.socket().close();
         } catch (IOException e) {
-          LOG.info(getName() + ":Exception in closing listener socket. " + e);
+          LOG.info(Thread.currentThread().getName() + ":Exception in closing listener socket. " + e);
         }
       }
       for (Reader r : readers) {
@@ -859,16 +770,16 @@ public abstract class Server {
 
     @Override
     public void run() {
-      LOG.info(getName() + ": starting");
+      LOG.info(Thread.currentThread().getName() + ": starting");
       SERVER.set(Server.this);
       try {
         doRunLoop();
       } finally {
-        LOG.info("Stopping " + this.getName());
+        LOG.info("Stopping " + Thread.currentThread().getName());
         try {
           writeSelector.close();
         } catch (IOException ioe) {
-          LOG.error("Couldn't close write selector in " + this.getName(), ioe);
+          LOG.error("Couldn't close write selector in " + Thread.currentThread().getName(), ioe);
         }
       }
     }
@@ -889,7 +800,7 @@ public abstract class Server {
                   doAsyncWrite(key);
               }
             } catch (IOException e) {
-              LOG.info(getName() + ": doAsyncWrite threw exception " + e);
+              LOG.info(Thread.currentThread().getName() + ": doAsyncWrite threw exception " + e);
             }
           }
           long now = Time.now();
@@ -1004,7 +915,7 @@ public abstract class Server {
           call = responseQueue.removeFirst();
           SocketChannel channel = call.connection.channel;
           if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": responding to " + call);
+            LOG.debug(Thread.currentThread().getName() + ": responding to " + call);
           }
           //
           // Send as much data as we can in the non-blocking fashion
@@ -1023,7 +934,7 @@ public abstract class Server {
               done = false;            // more calls pending to be sent.
             }
             if (LOG.isDebugEnabled()) {
-              LOG.debug(getName() + ": responding to " + call
+              LOG.debug(Thread.currentThread().getName() + ": responding to " + call
                   + " Wrote " + numBytes + " bytes.");
             }
           } else {
@@ -1051,7 +962,7 @@ public abstract class Server {
               }
             }
             if (LOG.isDebugEnabled()) {
-              LOG.debug(getName() + ": responding to " + call
+              LOG.debug(Thread.currentThread().getName() + ": responding to " + call
                   + " Wrote partial " + numBytes + " bytes.");
             }
           }
@@ -1059,7 +970,7 @@ public abstract class Server {
         }
       } finally {
         if (error && call != null) {
-          LOG.warn(getName()+", call " + call + ": output error");
+          LOG.warn(Thread.currentThread().getName()+", call " + call + ": output error");
           done = true;               // error. no more data for this channel.
           closeConnection(call.connection);
         }
@@ -1187,8 +1098,7 @@ public abstract class Server {
     private boolean sentNegotiate = false;
     private boolean useWrap = false;
     
-    public Connection(SelectionKey key, SocketChannel channel, 
-                      long lastContact) {
+    public Connection(SocketChannel channel, long lastContact) {
       this.channel = channel;
       this.lastContact = lastContact;
       this.data = null;
@@ -1250,12 +1160,6 @@ public abstract class Server {
       rpcCount++;
     }
     
-    private boolean timedOut(long currentTime) {
-      if (isIdle() && currentTime -  lastContact > maxIdleTime)
-        return true;
-      return false;
-    }
-    
     private UserGroupInformation getAuthorizedUgi(String authorizedId)
         throws InvalidToken, AccessControlException {
       if (authMethod == AuthMethod.TOKEN) {
@@ -2021,7 +1925,7 @@ public abstract class Server {
 
     @Override
     public void run() {
-      LOG.debug(getName() + ": starting");
+      LOG.debug(Thread.currentThread().getName() + ": starting");
       SERVER.set(Server.this);
       ByteArrayOutputStream buf = 
         new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
@@ -2029,7 +1933,11 @@ public abstract class Server {
         try {
           final Call call = callQueue.take(); // pop the queue; maybe blocked here
           if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": " + call + " for RpcKind " + call.rpcKind);
+            LOG.debug(Thread.currentThread().getName() + ": " + call + " for RpcKind " + call.rpcKind);
+          }
+          if (!call.connection.channel.isOpen()) {
+            LOG.info(Thread.currentThread().getName() + ": skipped " + call);
+            continue;
           }
           String errorClass = null;
           String error = null;
@@ -2062,7 +1970,7 @@ public abstract class Server {
             if (e instanceof UndeclaredThrowableException) {
               e = e.getCause();
             }
-            String logMsg = getName() + ", call " + call + ": error: " + e;
+            String logMsg = Thread.currentThread().getName() + ", call " + call + ": error: " + e;
             if (e instanceof RuntimeException || e instanceof Error) {
               // These exception types indicate something is probably wrong
               // on the server side, as opposed to just a normal exceptional
@@ -2111,13 +2019,13 @@ public abstract class Server {
           }
         } catch (InterruptedException e) {
           if (running) {                          // unexpected -- log it
-            LOG.info(getName() + " unexpectedly interrupted", e);
+            LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
           }
         } catch (Exception e) {
-          LOG.info(getName() + " caught an exception", e);
+          LOG.info(Thread.currentThread().getName() + " caught an exception", e);
         }
       }
-      LOG.debug(getName() + ": exiting");
+      LOG.debug(Thread.currentThread().getName() + ": exiting");
     }
 
   }
@@ -2186,16 +2094,10 @@ public abstract class Server {
           CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
           CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
     }
+    this.readerPendingConnectionQueue = conf.getInt(
+        CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY,
+        CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT);
     this.callQueue  = new LinkedBlockingQueue<Call>(maxQueueSize); 
-    this.maxIdleTime = 2 * conf.getInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
-        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
-    this.maxConnectionsToNuke = conf.getInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
-        CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
-    this.thresholdIdleConnections = conf.getInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
-        CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
     this.secretManager = (SecretManager<TokenIdentifier>) secretManager;
     this.authorize = 
       conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, 
@@ -2208,7 +2110,8 @@ public abstract class Server {
     // Start the listener here and let it bind to the port
     listener = new Listener();
     this.port = listener.getAddress().getPort();    
-    this.rpcMetrics = RpcMetrics.create(this);
+    connectionManager = new ConnectionManager();
+    this.rpcMetrics = RpcMetrics.create(this, conf);
     this.rpcDetailedMetrics = RpcDetailedMetrics.create(this.port);
     this.tcpNoDelay = conf.getBoolean(
         CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
@@ -2274,11 +2177,7 @@ public abstract class Server {
   }
   
   private void closeConnection(Connection connection) {
-    synchronized (connectionList) {
-      if (connectionList.remove(connection))
-        numConnections--;
-    }
-    connection.close();
+    connectionManager.close(connection);
   }
   
   /**
@@ -2533,7 +2432,7 @@ public abstract class Server {
    * @return the number of open rpc connections
    */
   public int getNumOpenConnections() {
-    return numConnections;
+    return connectionManager.size();
   }
   
   /**
@@ -2643,4 +2542,151 @@ public abstract class Server {
     int nBytes = initialRemaining - buf.remaining(); 
     return (nBytes > 0) ? nBytes : ret;
   }
+  
+  private class ConnectionManager {
+    final private AtomicInteger count = new AtomicInteger();    
+    final private Set<Connection> connections;
+
+    final private Timer idleScanTimer;
+    final private int idleScanThreshold;
+    final private int idleScanInterval;
+    final private int maxIdleTime;
+    final private int maxIdleToClose;
+    
+    ConnectionManager() {
+      this.idleScanTimer = new Timer(
+          "IPC Server idle connection scanner for port " + getPort(), true);
+      this.idleScanThreshold = conf.getInt(
+          CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
+          CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
+      this.idleScanInterval = conf.getInt(
+          CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,
+          CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT);
+      this.maxIdleTime = 2 * conf.getInt(
+          CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
+          CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
+      this.maxIdleToClose = conf.getInt(
+          CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
+          CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
+      // create a set with concurrency -and- a thread-safe iterator, add 2
+      // for listener and idle closer threads
+      this.connections = Collections.newSetFromMap(
+          new ConcurrentHashMap<Connection,Boolean>(
+              maxQueueSize, 0.75f, readThreads+2));
+    }
+
+    private boolean add(Connection connection) {
+      boolean added = connections.add(connection);
+      if (added) {
+        count.getAndIncrement();
+      }
+      return added;
+    }
+    
+    private boolean remove(Connection connection) {
+      boolean removed = connections.remove(connection);
+      if (removed) {
+        count.getAndDecrement();
+      }
+      return removed;
+    }
+    
+    int size() {
+      return count.get();
+    }
+
+    Connection[] toArray() {
+      return connections.toArray(new Connection[0]);
+    }
+
+    Connection register(SocketChannel channel) {
+      Connection connection = new Connection(channel, Time.now());
+      add(connection);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Server connection from " + connection +
+            "; # active connections: " + size() +
+            "; # queued calls: " + callQueue.size());
+      }      
+      return connection;
+    }
+    
+    boolean close(Connection connection) {
+      boolean exists = remove(connection);
+      if (exists) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(Thread.currentThread().getName() +
+              ": disconnecting client " + connection +
+              ". Number of active connections: "+ size());
+        }
+        // only close if actually removed to avoid double-closing due
+        // to possible races
+        connection.close();
+      }
+      return exists;
+    }
+    
+    // synch'ed to avoid explicit invocation upon OOM from colliding with
+    // timer task firing
+    synchronized void closeIdle(boolean scanAll) {
+      long minLastContact = Time.now() - maxIdleTime;
+      // concurrent iterator might miss new connections added
+      // during the iteration, but that's ok because they won't
+      // be idle yet anyway and will be caught on next scan
+      int closed = 0;
+      for (Connection connection : connections) {
+        // stop if connections dropped below threshold unless scanning all
+        if (!scanAll && size() < idleScanThreshold) {
+          break;
+        }
+        // stop if not scanning all and max connections are closed
+        if (connection.isIdle() &&
+            connection.getLastContact() < minLastContact &&
+            close(connection) &&
+            !scanAll && (++closed == maxIdleToClose)) {
+          break;
+        }
+      }
+    }
+    
+    void closeAll() {
+      // use a copy of the connections to be absolutely sure the concurrent
+      // iterator doesn't miss a connection
+      for (Connection connection : toArray()) {
+        close(connection);
+      }
+    }
+    
+    void startIdleScan() {
+      scheduleIdleScanTask();
+    }
+    
+    void stopIdleScan() {
+      idleScanTimer.cancel();
+    }
+    
+    private void scheduleIdleScanTask() {
+      if (!running) {
+        return;
+      }
+      TimerTask idleScanTask = new TimerTask(){
+        @Override
+        public void run() {
+          if (!running) {
+            return;
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(Thread.currentThread().getName()+": task running");
+          }
+          try {
+            closeIdle(false);
+          } finally {
+            // explicitly reschedule so next execution occurs relative
+            // to the end of this scan, not the beginning
+            scheduleIdleScanTask();
+          }
+        }
+      };
+      idleScanTimer.schedule(idleScanTask, idleScanInterval);
+    }
+  }
 }

+ 40 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

@@ -19,14 +19,17 @@ package org.apache.hadoop.ipc.metrics;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 
 /**
@@ -41,26 +44,48 @@ public class RpcMetrics {
   final Server server;
   final MetricsRegistry registry;
   final String name;
+  final boolean rpcQuantileEnable;
   
-  RpcMetrics(Server server) {
+  RpcMetrics(Server server, Configuration conf) {
     String port = String.valueOf(server.getListenerAddress().getPort());
-    name = "RpcActivityForPort"+ port;
+    name = "RpcActivityForPort" + port;
     this.server = server;
     registry = new MetricsRegistry("rpc").tag("port", "RPC port", port);
-    LOG.debug("Initialized "+ registry);
+    int[] intervals = conf.getInts(
+        CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
+    rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
+        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE, false);
+    if (rpcQuantileEnable) {
+      rpcQueueTimeMillisQuantiles =
+          new MutableQuantiles[intervals.length];
+      rpcProcessingTimeMillisQuantiles =
+          new MutableQuantiles[intervals.length];
+      for (int i = 0; i < intervals.length; i++) {
+        int interval = intervals[i];
+        rpcQueueTimeMillisQuantiles[i] = registry.newQuantiles("rpcQueueTime"
+            + interval + "s", "rpc queue time in milli second", "ops",
+            "latency", interval);
+        rpcProcessingTimeMillisQuantiles[i] = registry.newQuantiles(
+            "rpcProcessingTime" + interval + "s",
+            "rpc processing time in milli second", "ops", "latency", interval);
+      }
+    }
+    LOG.debug("Initialized " + registry);
   }
 
   public String name() { return name; }
 
-  public static RpcMetrics create(Server server) {
-    RpcMetrics m = new RpcMetrics(server);
+  public static RpcMetrics create(Server server, Configuration conf) {
+    RpcMetrics m = new RpcMetrics(server, conf);
     return DefaultMetricsSystem.instance().register(m.name, null, m);
   }
 
   @Metric("Number of received bytes") MutableCounterLong receivedBytes;
   @Metric("Number of sent bytes") MutableCounterLong sentBytes;
   @Metric("Queue time") MutableRate rpcQueueTime;
+  MutableQuantiles[] rpcQueueTimeMillisQuantiles;
   @Metric("Processsing time") MutableRate rpcProcessingTime;
+  MutableQuantiles[] rpcProcessingTimeMillisQuantiles;
   @Metric("Number of authentication failures")
   MutableCounterInt rpcAuthenticationFailures;
   @Metric("Number of authentication successes")
@@ -146,6 +171,11 @@ public class RpcMetrics {
   //@Override
   public void addRpcQueueTime(int qTime) {
     rpcQueueTime.add(qTime);
+    if (rpcQuantileEnable) {
+      for (MutableQuantiles q : rpcQueueTimeMillisQuantiles) {
+        q.add(qTime);
+      }
+    }
   }
 
   /**
@@ -155,5 +185,10 @@ public class RpcMetrics {
   //@Override
   public void addRpcProcessingTime(int processingTime) {
     rpcProcessingTime.add(processingTime);
+    if (rpcQuantileEnable) {
+      for (MutableQuantiles q : rpcProcessingTimeMillisQuantiles) {
+        q.add(processingTime);
+      }
+    }
   }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java

@@ -112,7 +112,7 @@ public abstract class AbstractPatternFilter extends MetricsFilter {
       return false;
     }
     // Reject if no match in whitelist only mode
-    if (ipat != null && epat == null) {
+    if (!includeTagPatterns.isEmpty() && excludeTagPatterns.isEmpty()) {
       return false;
     }
     return true;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -38,7 +38,7 @@ import static com.google.common.base.Preconditions.*;
 import org.apache.commons.configuration.PropertiesConfiguration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math.util.MathUtils;
+import org.apache.commons.math3.util.ArithmeticUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -460,7 +460,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
       MetricsConfig conf = entry.getValue();
       int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
       confPeriod = confPeriod == 0 ? sinkPeriod
-                                   : MathUtils.gcd(confPeriod, sinkPeriod);
+                                   : ArithmeticUtils.gcd(confPeriod, sinkPeriod);
       String clsName = conf.getClassName("");
       if (clsName == null) continue;  // sink can be registered later on
       String sinkName = entry.getKey();

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java

@@ -38,8 +38,8 @@ public enum DefaultMetricsSystem {
   private AtomicReference<MetricsSystem> impl =
       new AtomicReference<MetricsSystem>(new MetricsSystemImpl());
   volatile boolean miniClusterMode = false;
-  final UniqueNames mBeanNames = new UniqueNames();
-  final UniqueNames sourceNames = new UniqueNames();
+  transient final UniqueNames mBeanNames = new UniqueNames();
+  transient final UniqueNames sourceNames = new UniqueNames();
 
   /**
    * Convenience method to initialize the metrics system

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java

@@ -234,7 +234,7 @@
     patterns.
   </p>
   <p>Similarly, you can specify the <code>record.filter</code> and
-    <code>metrics.filter</code> options, which operate at record and metric
+    <code>metric.filter</code> options, which operate at record and metric
     level, respectively. Filters can be combined to optimize
     the filtering efficiency.</p>
 

+ 13 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java

@@ -24,10 +24,8 @@ import java.lang.management.MemoryUsage;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.management.GarbageCollectorMXBean;
-import java.util.Map;
 import java.util.List;
-
-import com.google.common.collect.Maps;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.log.metrics.EventCounter;
@@ -67,7 +65,8 @@ public class JvmMetrics implements MetricsSource {
       ManagementFactory.getGarbageCollectorMXBeans();
   final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
   final String processName, sessionId;
-  final Map<String, MetricsInfo[]> gcInfoCache = Maps.newHashMap();
+  final ConcurrentHashMap<String, MetricsInfo[]> gcInfoCache =
+      new ConcurrentHashMap<String, MetricsInfo[]>();
 
   JvmMetrics(String processName, String sessionId) {
     this.processName = processName;
@@ -101,8 +100,10 @@ public class JvmMetrics implements MetricsSource {
     Runtime runtime = Runtime.getRuntime();
     rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
       .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
+      .addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M)
       .addGauge(MemHeapUsedM, memHeap.getUsed() / M)
       .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
+      .addGauge(MemHeapMaxM, memHeap.getMax() / M)
       .addGauge(MemMaxM, runtime.maxMemory() / M);
   }
 
@@ -121,13 +122,17 @@ public class JvmMetrics implements MetricsSource {
       .addCounter(GcTimeMillis, timeMillis);
   }
 
-  private synchronized MetricsInfo[] getGcInfo(String gcName) {
+  private MetricsInfo[] getGcInfo(String gcName) {
     MetricsInfo[] gcInfo = gcInfoCache.get(gcName);
     if (gcInfo == null) {
       gcInfo = new MetricsInfo[2];
-      gcInfo[0] = Interns.info("GcCount"+ gcName, "GC Count for "+ gcName);
-      gcInfo[1] = Interns.info("GcTimeMillis"+ gcName, "GC Time for "+ gcName);
-      gcInfoCache.put(gcName, gcInfo);
+      gcInfo[0] = Interns.info("GcCount" + gcName, "GC Count for " + gcName);
+      gcInfo[1] = Interns
+          .info("GcTimeMillis" + gcName, "GC Time for " + gcName);
+      MetricsInfo[] previousGcInfo = gcInfoCache.putIfAbsent(gcName, gcInfo);
+      if (previousGcInfo != null) {
+        return previousGcInfo;
+      }
     }
     return gcInfo;
   }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java

@@ -32,8 +32,10 @@ public enum JvmMetricsInfo implements MetricsInfo {
   // metrics
   MemNonHeapUsedM("Non-heap memory used in MB"),
   MemNonHeapCommittedM("Non-heap memory committed in MB"),
+  MemNonHeapMaxM("Non-heap memory max in MB"),
   MemHeapUsedM("Heap memory used in MB"),
   MemHeapCommittedM("Heap memory committed in MB"),
+  MemHeapMaxM("Heap memory max in MB"),
   MemMaxM("Max memory size in MB"),
   GcCount("Total GC count"),
   GcTimeMillis("Total GC time in milliseconds"),

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java

@@ -154,4 +154,11 @@ public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
   public void reloadCachedMappings() {
     cache.clear();
   }
+
+  @Override
+  public void reloadCachedMappings(List<String> names) {
+    for (String name : names) {
+      cache.remove(name);
+    }
+  }
 }

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java

@@ -59,4 +59,12 @@ public interface DNSToSwitchMapping {
    * will get a chance to see the new data.
    */
   public void reloadCachedMappings();
+  
+  /**
+   * Reload cached mappings on specific nodes.
+   *
+   * If there is a cache on these nodes, this method will clear it, so that 
+   * future accesses will see updated data.
+   */
+  public void reloadCachedMappings(List<String> names);
 }

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java

@@ -269,5 +269,11 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
       // Nothing to do here, since RawScriptBasedMapping has no cache, and
       // does not inherit from CachedDNSToSwitchMapping
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+      // Nothing to do here, since RawScriptBasedMapping has no cache, and
+      // does not inherit from CachedDNSToSwitchMapping
+    }
   }
 }

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java

@@ -162,5 +162,12 @@ public class TableMapping extends CachedDNSToSwitchMapping {
         }
       }
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+      // TableMapping has to reload all mappings at once, so no chance to 
+      // reload mappings on specific nodes
+      reloadCachedMappings();
+    }
   }
 }

部分文件因为文件数量过多而无法显示