Pārlūkot izejas kodu

Merge r1346682 through r1354801 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1354832 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 gadi atpakaļ
vecāks
revīzija
72d06c332d
100 mainītis faili ar 3549 papildinājumiem un 2463 dzēšanām
  1. 2 2
      dev-support/test-patch.sh
  2. 60 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-raid-dist.xml
  3. 67 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  4. 18 0
      hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
  5. 12 1
      hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
  6. 1 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  7. 38 33
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  8. 28 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
  9. 5 5
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
  10. 21 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  11. 21 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  12. 5 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  13. 154 38
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  14. 81 40
      hadoop-common-project/hadoop-common/CHANGES.txt
  15. 18 85
      hadoop-common-project/hadoop-common/pom.xml
  16. 126 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  17. 10 0
      hadoop-common-project/hadoop-common/src/config.h.cmake
  18. 1 1
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  19. 32 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  20. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  21. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  22. 8 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  23. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
  24. 20 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  25. 33 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  26. 28 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  27. 8 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  28. 230 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  29. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
  30. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
  31. 83 39
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  32. 19 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  33. 11 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  34. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
  35. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
  36. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  37. 142 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  38. 398 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
  39. 20 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
  40. 11 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
  41. 0 42
      hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg
  42. 0 66
      hadoop-common-project/hadoop-common/src/main/native/Makefile.am
  43. 0 28
      hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
  44. 0 130
      hadoop-common-project/hadoop-common/src/main/native/configure.ac
  45. 0 47
      hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am
  46. 1 4
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
  47. 1 4
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
  48. 5 31
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
  49. 5 31
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
  50. 6 35
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
  51. 0 53
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am
  52. 5 27
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
  53. 5 27
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
  54. 6 33
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
  55. 1 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
  56. 1 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
  57. 3 14
      hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
  58. 1 1
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
  59. 9 3
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  60. 20 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  61. 13 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
  62. 43 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
  63. 9 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
  64. 18 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  65. 43 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
  66. 38 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
  67. 86 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
  68. 6 0
      hadoop-dist/pom.xml
  69. 82 67
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  70. 3 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
  71. 6 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
  72. 398 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
  73. 0 551
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
  74. 325 359
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
  75. 3 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
  77. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
  78. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
  79. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
  80. 15 8
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
  81. 27 28
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
  82. 107 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
  83. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
  84. 9 13
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
  85. 1 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
  86. 9 9
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
  87. 0 50
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
  88. 0 53
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
  89. 0 52
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
  90. 0 47
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
  91. 120 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
  92. 0 53
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
  93. 0 64
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
  94. 22 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties
  95. 170 0
      hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml
  96. 0 0
      hadoop-hdfs-project/hadoop-hdfs-raid/src/main/conf/raid.xml
  97. 0 0
      hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
  98. 0 0
      hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
  99. 0 0
      hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
  100. 171 123
      hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java

+ 2 - 2
dev-support/test-patch.sh

@@ -418,8 +418,8 @@ checkJavacWarnings () {
   echo "======================================================================"
   echo ""
   echo ""
-  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
-  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
+  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Pnative -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
+  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Pnative -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
   if [[ $? != 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 

+ 60 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-raid-dist.xml

@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-raid-dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <!-- Configuration files -->
+    <fileSet>
+      <directory>${basedir}/src/main/conf</directory>
+      <outputDirectory>/etc/hadoop</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sbin</directory>
+      <outputDirectory>/sbin</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/libexec</directory>
+      <outputDirectory>/libexec</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <!-- Documentation -->
+    <fileSet>
+      <directory>${project.build.directory}/site</directory>
+      <outputDirectory>/share/doc/hadoop/raid</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>/share/hadoop/${hadoop.component}/lib</outputDirectory>
+      <unpack>false</unpack>
+      <scope>runtime</scope>
+      <useProjectArtifact>true</useProjectArtifact>
+    </dependencySet>
+  </dependencySets>
+</assembly>

+ 67 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -0,0 +1,67 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>hadoop-tools</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>../hadoop-pipes/src/main/native/pipes/api/hadoop</directory>
+      <includes>
+        <include>*.hh</include>
+      </includes>
+      <outputDirectory>/include</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-pipes/src/main/native/utils/api/hadoop</directory>
+      <includes>
+        <include>*.hh</include>
+      </includes>
+      <outputDirectory>/include</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-pipes/target/native</directory>
+      <includes>
+        <include>*.a</include>
+      </includes>
+      <outputDirectory>lib/native</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>/share/hadoop/${hadoop.component}/lib</outputDirectory>
+      <unpack>false</unpack>
+      <scope>runtime</scope>
+      <useProjectArtifact>false</useProjectArtifact>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <exclude>org.apache.hadoop:hadoop-mapreduce</exclude>
+        <!-- pipes is native stuff, this just keeps pom from being package-->
+        <exclude>org.apache.hadoop:hadoop-pipes</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
+</assembly>

+ 18 - 0
hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java

@@ -21,6 +21,24 @@ import java.lang.annotation.Documented;
 
 /**
  * Annotation to inform users of a package, class or method's intended audience.
+ * Currently the audience can be {@link Public}, {@link LimitedPrivate} or
+ * {@link Private}. <br>
+ * All public classes must have InterfaceAudience annotation. <br>
+ * <ul>
+ * <li>Public classes that are not marked with this annotation must be
+ * considered by default as {@link Private}.</li> 
+ * 
+ * <li>External applications must only use classes that are marked
+ * {@link Public}. Avoid using non public classes as these classes
+ * could be removed or change in incompatible ways.</li>
+ * 
+ * <li>Hadoop projects must only use classes that are marked
+ * {@link LimitedPrivate} or {@link Public}</li>
+ * 
+ * <li> Methods may have a different annotation that it is more restrictive
+ * compared to the audience classification of the class. Example: A class 
+ * might be {@link Public}, but a method may be {@link LimitedPrivate}
+ * </li></ul>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

+ 12 - 1
hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java

@@ -19,9 +19,20 @@ package org.apache.hadoop.classification;
 
 import java.lang.annotation.Documented;
 
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+
 /**
  * Annotation to inform users of how much to rely on a particular package,
- * class or method not changing over time.
+ * class or method not changing over time. Currently the stability can be
+ * {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
+ * 
+ * <ul><li>All classes that are annotated with {@link Public} or
+ * {@link LimitedPrivate} must have InterfaceStability annotation. </li>
+ * <li>Classes that are {@link Private} are to be considered unstable unless
+ * a different InterfaceStability annotation states otherwise.</li>
+ * <li>Incompatible changes must not be made to classes marked as stable.</li>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

+ 1 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -266,6 +266,7 @@ public class AuthenticatedURL {
         }
       }
     } else {
+      token.set(null);
       throw new AuthenticationException("Authentication failed, status: " + conn.getResponseCode() +
                                         ", message: " + conn.getResponseMessage());
     }

+ 38 - 33
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -341,45 +341,50 @@ public class AuthenticationFilter implements Filter {
         LOG.warn("AuthenticationToken ignored: " + ex.getMessage());
         token = null;
       }
-      if (token == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
-        }
-        token = authHandler.authenticate(httpRequest, httpResponse);
-        if (token != null && token != AuthenticationToken.ANONYMOUS) {
-          token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
-        }
-        newToken = true;
-      }
-      if (token != null) {
-        unauthorizedResponse = false;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
+      if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
+        if (token == null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
+          }
+          token = authHandler.authenticate(httpRequest, httpResponse);
+          if (token != null && token.getExpires() != 0 &&
+              token != AuthenticationToken.ANONYMOUS) {
+            token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
+          }
+          newToken = true;
         }
-        final AuthenticationToken authToken = token;
-        httpRequest = new HttpServletRequestWrapper(httpRequest) {
-
-          @Override
-          public String getAuthType() {
-            return authToken.getType();
+        if (token != null) {
+          unauthorizedResponse = false;
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
           }
+          final AuthenticationToken authToken = token;
+          httpRequest = new HttpServletRequestWrapper(httpRequest) {
 
-          @Override
-          public String getRemoteUser() {
-            return authToken.getUserName();
-          }
+            @Override
+            public String getAuthType() {
+              return authToken.getType();
+            }
 
-          @Override
-          public Principal getUserPrincipal() {
-            return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
+            @Override
+            public String getRemoteUser() {
+              return authToken.getUserName();
+            }
+
+            @Override
+            public Principal getUserPrincipal() {
+              return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
+            }
+          };
+          if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
+            String signedToken = signer.sign(token.toString());
+            Cookie cookie = createCookie(signedToken);
+            httpResponse.addCookie(cookie);
           }
-        };
-        if (newToken && token != AuthenticationToken.ANONYMOUS) {
-          String signedToken = signer.sign(token.toString());
-          Cookie cookie = createCookie(signedToken);
-          httpResponse.addCookie(cookie);
+          filterChain.doFilter(httpRequest, httpResponse);
         }
-        filterChain.doFilter(httpRequest, httpResponse);
+      } else {
+        unauthorizedResponse = false;
       }
     } catch (AuthenticationException ex) {
       unauthorizedMsg = ex.toString();

+ 28 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java

@@ -58,6 +58,34 @@ public interface AuthenticationHandler {
    */
   public void destroy();
 
+  /**
+   * Performs an authentication management operation.
+   * <p/>
+   * This is useful for handling operations like get/renew/cancel
+   * delegation tokens which are being handled as operations of the
+   * service end-point.
+   * <p/>
+   * If the method returns <code>TRUE</code> the request will continue normal
+   * processing, this means the method has not produced any HTTP response.
+   * <p/>
+   * If the method returns <code>FALSE</code> the request will end, this means 
+   * the method has produced the corresponding HTTP response.
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   * @return <code>TRUE</code> if the request should be processed as a regular
+   * request,
+   * <code>FALSE</code> otherwise.
+   *
+   * @throws IOException thrown if an IO error occurred.
+   * @throws AuthenticationException thrown if an Authentication error occurred.
+   */
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException;
+
   /**
    * Performs an authentication step for the given HTTP client request.
    * <p/>

+ 5 - 5
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java

@@ -115,10 +115,10 @@ public class AuthenticationToken implements Principal {
    */
   private void generateToken() {
     StringBuffer sb = new StringBuffer();
-    sb.append(USER_NAME).append("=").append(userName).append(ATTR_SEPARATOR);
-    sb.append(PRINCIPAL).append("=").append(principal).append(ATTR_SEPARATOR);
-    sb.append(TYPE).append("=").append(type).append(ATTR_SEPARATOR);
-    sb.append(EXPIRES).append("=").append(expires);
+    sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
+    sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
+    sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);
+    sb.append(EXPIRES).append("=").append(getExpires());
     token = sb.toString();
   }
 
@@ -165,7 +165,7 @@ public class AuthenticationToken implements Principal {
    * @return if the token has expired.
    */
   public boolean isExpired() {
-    return expires != -1 && System.currentTimeMillis() > expires;
+    return getExpires() != -1 && System.currentTimeMillis() > getExpires();
   }
 
   /**

+ 21 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -232,6 +232,27 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
     return keytab;
   }
 
+  /**
+   * This is an empty implementation, it always returns <code>TRUE</code>.
+   *
+   *
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return <code>TRUE</code>
+   * @throws IOException it is never thrown.
+   * @throws AuthenticationException it is never thrown.
+   */
+  @Override
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    return true;
+  }
+
   /**
    * It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
    * after the Kerberos SPNEGO sequence has completed successfully.

+ 21 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -93,6 +93,27 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
     return TYPE;
   }
 
+  /**
+   * This is an empty implementation, it always returns <code>TRUE</code>.
+   *
+   *
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return <code>TRUE</code>
+   * @throws IOException it is never thrown.
+   * @throws AuthenticationException it is never thrown.
+   */
+  @Override
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    return true;
+  }
+
   /**
    * Authenticates an HTTP client request.
    * <p/>

+ 5 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -13,6 +13,7 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import junit.framework.Assert;
 import junit.framework.TestCase;
 import org.mockito.Mockito;
 
@@ -100,11 +101,14 @@ public class TestAuthenticatedURL extends TestCase {
     headers.put("Set-Cookie", cookies);
     Mockito.when(conn.getHeaderFields()).thenReturn(headers);
 
+    AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+    token.set("bar");
     try {
-      AuthenticatedURL.extractToken(conn, new AuthenticatedURL.Token());
+      AuthenticatedURL.extractToken(conn, token);
       fail();
     } catch (AuthenticationException ex) {
       // Expected
+      Assert.assertFalse(token.isSet());
     } catch (Exception ex) {
       fail();
     }

+ 154 - 38
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -71,7 +71,9 @@ public class TestAuthenticationFilter extends TestCase {
 
   public static class DummyAuthenticationHandler implements AuthenticationHandler {
     public static boolean init;
+    public static boolean managementOperationReturn;
     public static boolean destroy;
+    public static boolean expired;
 
     public static final String TYPE = "dummy";
 
@@ -83,6 +85,20 @@ public class TestAuthenticationFilter extends TestCase {
     @Override
     public void init(Properties config) throws ServletException {
       init = true;
+      managementOperationReturn =
+        config.getProperty("management.operation.return", "true").equals("true");
+      expired = config.getProperty("expired.token", "false").equals("true");
+    }
+
+    @Override
+    public boolean managementOperation(AuthenticationToken token,
+                                       HttpServletRequest request,
+                                       HttpServletResponse response)
+      throws IOException, AuthenticationException {
+      if (!managementOperationReturn) {
+        response.setStatus(HttpServletResponse.SC_ACCEPTED);
+      }
+      return managementOperationReturn;
     }
 
     @Override
@@ -102,7 +118,7 @@ public class TestAuthenticationFilter extends TestCase {
       String param = request.getParameter("authenticated");
       if (param != null && param.equals("true")) {
         token = new AuthenticationToken("u", "p", "t");
-        token.setExpires(System.currentTimeMillis() + 1000);
+        token.setExpires((expired) ? 0 : System.currentTimeMillis() + 1000);
       } else {
         response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
       }
@@ -170,10 +186,14 @@ public class TestAuthenticationFilter extends TestCase {
     filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
       assertTrue(DummyAuthenticationHandler.init);
     } finally {
@@ -201,10 +221,14 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -221,12 +245,16 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
@@ -250,12 +278,15 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
@@ -284,12 +315,16 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
@@ -318,10 +353,14 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -349,10 +388,16 @@ public class TestAuthenticationFilter extends TestCase {
     }
   }
 
-  private void _testDoFilterAuthentication(boolean withDomainPath, boolean invalidToken) throws Exception {
+  private void _testDoFilterAuthentication(boolean withDomainPath,
+                                           boolean invalidToken,
+                                           boolean expired) throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
+      Mockito.when(config.getInitParameter("expired.token")).
+        thenReturn(Boolean.toString(expired));
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
@@ -360,7 +405,9 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+                                 AuthenticationFilter.SIGNATURE_SECRET,
+                                 "management.operation.return",
+                                 "expired.token")).elements());
 
       if (withDomainPath) {
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
@@ -370,7 +417,8 @@ public class TestAuthenticationFilter extends TestCase {
                                    AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                    AuthenticationFilter.SIGNATURE_SECRET,
                                    AuthenticationFilter.COOKIE_DOMAIN,
-                                   AuthenticationFilter.COOKIE_PATH)).elements());
+                                   AuthenticationFilter.COOKIE_PATH,
+                                   "management.operation.return")).elements());
       }
 
       filter.init(config);
@@ -416,26 +464,32 @@ public class TestAuthenticationFilter extends TestCase {
 
       filter.doFilter(request, response, chain);
 
-      assertNotNull(setCookie[0]);
-      assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
-      assertTrue(setCookie[0].getValue().contains("u="));
-      assertTrue(setCookie[0].getValue().contains("p="));
-      assertTrue(setCookie[0].getValue().contains("t="));
-      assertTrue(setCookie[0].getValue().contains("e="));
-      assertTrue(setCookie[0].getValue().contains("s="));
-      assertTrue(calledDoFilter[0]);
-
-      Signer signer = new Signer("secret".getBytes());
-      String value = signer.verifyAndExtract(setCookie[0].getValue());
-      AuthenticationToken token = AuthenticationToken.parse(value);
-      assertEquals(System.currentTimeMillis() + 1000 * 1000, token.getExpires(), 100);
-
-      if (withDomainPath) {
-        assertEquals(".foo.com", setCookie[0].getDomain());
-        assertEquals("/bar", setCookie[0].getPath());
+      if (expired) {
+        Mockito.verify(response, Mockito.never()).
+          addCookie(Mockito.any(Cookie.class));
       } else {
-        assertNull(setCookie[0].getDomain());
-        assertNull(setCookie[0].getPath());
+        assertNotNull(setCookie[0]);
+        assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
+        assertTrue(setCookie[0].getValue().contains("u="));
+        assertTrue(setCookie[0].getValue().contains("p="));
+        assertTrue(setCookie[0].getValue().contains("t="));
+        assertTrue(setCookie[0].getValue().contains("e="));
+        assertTrue(setCookie[0].getValue().contains("s="));
+        assertTrue(calledDoFilter[0]);
+
+        Signer signer = new Signer("secret".getBytes());
+        String value = signer.verifyAndExtract(setCookie[0].getValue());
+        AuthenticationToken token = AuthenticationToken.parse(value);
+        assertEquals(System.currentTimeMillis() + 1000 * 1000,
+                     token.getExpires(), 100);
+
+        if (withDomainPath) {
+          assertEquals(".foo.com", setCookie[0].getDomain());
+          assertEquals("/bar", setCookie[0].getPath());
+        } else {
+          assertNull(setCookie[0].getDomain());
+          assertNull(setCookie[0].getPath());
+        }
       }
     } finally {
       filter.destroy();
@@ -443,25 +497,33 @@ public class TestAuthenticationFilter extends TestCase {
   }
 
   public void testDoFilterAuthentication() throws Exception {
-    _testDoFilterAuthentication(false, false);
+    _testDoFilterAuthentication(false, false, false);
+  }
+
+  public void testDoFilterAuthenticationImmediateExpiration() throws Exception {
+    _testDoFilterAuthentication(false, false, true);
   }
 
   public void testDoFilterAuthenticationWithInvalidToken() throws Exception {
-    _testDoFilterAuthentication(false, true);
+    _testDoFilterAuthentication(false, true, false);
   }
 
   public void testDoFilterAuthenticationWithDomainPath() throws Exception {
-    _testDoFilterAuthentication(true, false);
+    _testDoFilterAuthentication(true, false, false);
   }
 
   public void testDoFilterAuthenticated() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -503,10 +565,14 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -563,10 +629,14 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -618,4 +688,50 @@ public class TestAuthenticationFilter extends TestCase {
     }
   }
 
+  public void testManagementOperation() throws Exception {
+    AuthenticationFilter filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("false");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).
+        thenReturn(DummyAuthenticationHandler.class.getName());
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
+      filter.init(config);
+
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getRequestURL()).
+        thenReturn(new StringBuffer("http://foo:8080/bar"));
+
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+      FilterChain chain = Mockito.mock(FilterChain.class);
+
+      filter.doFilter(request, response, chain);
+      Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
+      Mockito.verifyNoMoreInteractions(response);
+
+      Mockito.reset(request);
+      Mockito.reset(response);
+
+      AuthenticationToken token = new AuthenticationToken("u", "p", "t");
+      token.setExpires(System.currentTimeMillis() + 1000);
+      Signer signer = new Signer("secret".getBytes());
+      String tokenSigned = signer.sign(token.toString());
+      Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
+
+      filter.doFilter(request, response, chain);
+
+      Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
+      Mockito.verifyNoMoreInteractions(response);
+
+    } finally {
+      filter.destroy();
+    }
+  }
+
 }

+ 81 - 40
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -9,11 +9,11 @@ Trunk (unreleased changes)
 
   NEW FEATURES
     
-    HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
-    Robinson via atm)
+    HADOOP-8469. Make NetworkTopology class pluggable.  (Junping Du via
+    szetszwo)
 
-    HDFS-3042. Automatic failover support for NameNode HA (todd)
-    (see dedicated section below for breakdown of subtasks)
+    HADOOP-8470. Add NetworkTopologyWithNodeGroup, a 4-layer implementation
+    of NetworkTopology.  (Junping Du via szetszwo)
 
   IMPROVEMENTS
 
@@ -55,9 +55,6 @@ Trunk (unreleased changes)
     HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the 
     client side translator and server side implementation. (jitendra)
 
-    HADOOP-8244. Improve comments on ByteBufferReadable.read. (Henry Robinson
-    via atm)
-
     HADOOP-7757. Test file reference count is at least 3x actual value (Jon
     Eagles via bobby)
 
@@ -82,6 +79,9 @@ Trunk (unreleased changes)
     HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
     filesystems (harsh)
 
+    HADOOP-8059. Add javadoc to InterfaceAudience and InterfaceStability.
+    (Brandon Li via suresh)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -165,39 +165,7 @@ Trunk (unreleased changes)
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
-  BREAKDOWN OF HDFS-3042 SUBTASKS
-
-    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
-    correctly (todd)
-    
-    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
-    
-    HADOOP-8215. Security support for ZK Failover controller (todd)
-    
-    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
-    
-    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
-    error (todd)
-    
-    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
-    class (todd)
-    
-    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
-    
-    HADOOP-8247. Add a config to enable auto-HA, which disables manual
-    FailoverController (todd)
-    
-    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
-    
-    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
-    enabled. (todd)
-    
-    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
-    (todd via eli)
-    
-    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
-
-Release 2.0.1-alpha - UNRELEASED
+Branch-2 ( Unreleased changes )
 
   INCOMPATIBLE CHANGES
 
@@ -206,6 +174,17 @@ Release 2.0.1-alpha - UNRELEASED
 
   NEW FEATURES
  
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
+    HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
+    Robinson via atm)
+
+    HADOOP-8458. Add management hook to AuthenticationHandler to enable 
+    delegation token operations support (tucu)
+
+    HADOOP-8465. hadoop-auth should support ephemeral authentication (tucu)
+
   IMPROVEMENTS
 
     HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
@@ -228,6 +207,14 @@ Release 2.0.1-alpha - UNRELEASED
 
     HADOOP-8450. Remove src/test/system. (eli)
 
+    HADOOP-8244. Improve comments on ByteBufferReadable.read. (Henry Robinson
+    via atm)
+
+    HADOOP-8368. Use CMake rather than autotools to build native code (ccccabe via tucu)
+
+    HADOOP-8524. Allow users to get source of a Configuration
+    parameter (harsh)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -267,6 +254,54 @@ Release 2.0.1-alpha - UNRELEASED
     HADOOP-8481. update BUILDING.txt to talk about cmake rather than autotools.
     (Colin Patrick McCabe via eli)
 
+    HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli)
+
+    HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8507. Avoid OOM while deserializing DelegationTokenIdentifer.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8433. Don't set HADOOP_LOG_DIR in hadoop-env.sh.
+    (Brahma Reddy Battula via eli)
+
+    HADOOP-8509. JarFinder duplicate entry: META-INF/MANIFEST.MF exception (tucu)
+
+    HADOOP-8512. AuthenticatedURL should reset the Token when the server returns 
+    other than OK on authentication (tucu)
+
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
+    correctly (todd)
+
+    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
+    
+    HADOOP-8215. Security support for ZK Failover controller (todd)
+    
+    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
+    
+    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
+    error (todd)
+    
+    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
+    class (todd)
+    
+    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
+    
+    HADOOP-8247. Add a config to enable auto-HA, which disables manual
+    FailoverController (todd)
+    
+    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
+    
+    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
+    enabled. (todd)
+    
+    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
+    (todd via eli)
+    
+    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
@@ -696,6 +731,12 @@ Release 0.23.3 - UNRELEASED
 
     HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby)
 
+    HADOOP-8495. Update Netty to avoid leaking file descriptors during shuffle
+    (Jason Lowe via tgraves)
+
+    HADOOP-8129. ViewFileSystemTestSetup setupForViewFileSystem is erring
+    (Ahmed Radwan and Ravi Prakash via bobby)
+
 Release 0.23.2 - UNRELEASED 
 
   INCOMPATIBLE CHANGES

+ 18 - 85
hadoop-common-project/hadoop-common/pom.xml

@@ -536,31 +536,10 @@
         <snappy.prefix>/usr/local</snappy.prefix>
         <snappy.lib>${snappy.prefix}/lib</snappy.lib>
         <snappy.include>${snappy.prefix}/include</snappy.include>
+        <runas.home></runas.home>
       </properties>
       <build>
         <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>compile</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}/native/javah"/>
-                    <copy toDir="${project.build.directory}/native">
-                      <fileset dir="${basedir}/src/main/native"/>
-                    </copy>
-                    <mkdir dir="${project.build.directory}/native/m4"/>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
           <plugin>
             <groupId>org.codehaus.mojo</groupId>
             <artifactId>native-maven-plugin</artifactId>
@@ -590,73 +569,27 @@
             </executions>
           </plugin>
           <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                      <env key="CFLAGS" value="-I${snappy.include}"/>
+                      <env key="LDFLAGS" value="-L${snappy.lib}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
             </executions>
-            <configuration>
-              <!-- autoreconf settings -->
-              <workDir>${project.build.directory}/native</workDir>
-              <arguments>
-                <argument>-i</argument>
-                <argument>-f</argument>
-              </arguments>
-
-              <!-- configure settings -->
-              <configureEnvironment>
-                <property>
-                  <name>OS_NAME</name>
-                  <value>${os.name}</value>
-                </property>
-                <property>
-                  <name>OS_ARCH</name>
-                  <value>${os.arch}</value>
-                </property>
-                <property>
-                  <name>JVM_DATA_MODEL</name>
-                  <value>${sun.arch.data.model}</value>
-                </property>
-              </configureEnvironment>
-              <configureOptions>
-                <configureOption>CPPFLAGS=-I${snappy.include}</configureOption>
-                <configureOption>LDFLAGS=-L${snappy.lib}</configureOption>
-              </configureOptions>
-              <configureWorkDir>${project.build.directory}/native</configureWorkDir>
-              <prefix>/usr/local</prefix>
-
-              <!-- make settings -->
-              <installEnvironment>
-                <property>
-                  <name>OS_NAME</name>
-                  <value>${os.name}</value>
-                </property>
-                <property>
-                  <name>OS_ARCH</name>
-                  <value>${os.arch}</value>
-                </property>
-                <property>
-                  <name>JVM_DATA_MODEL</name>
-                  <value>${sun.arch.data.model}</value>
-                </property>
-                <property>
-                  <name>HADOOP_NATIVE_SRCDIR</name>
-                  <value>${project.build.directory}/native</value>
-                </property>
-              </installEnvironment>
-
-              <!-- configure & make settings -->
-              <destDir>${project.build.directory}/native/target</destDir>
-
-            </configuration>
           </plugin>
         </plugins>
       </build>
@@ -700,7 +633,7 @@
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>kdc</id>
                 <phase>compile</phase>
                 <goals>
                   <goal>run</goal>

+ 126 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -0,0 +1,126 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+
+# Default to release builds
+set(CMAKE_BUILD_TYPE, Release)
+
+# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
+# This variable is set by maven.
+if (JVM_ARCH_DATA_MODEL EQUAL 32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+    set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+        set(CMAKE_SYSTEM_PROCESSOR "i686")
+    endif ()
+endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+
+# Compile a library with both shared and static variants
+function(add_dual_library LIBNAME)
+    add_library(${LIBNAME} SHARED ${ARGN})
+    add_library(${LIBNAME}_static STATIC ${ARGN})
+    set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+endfunction(add_dual_library)
+
+# Link both a static and a dynamic target against some libraries
+function(target_link_dual_libraries LIBNAME)
+    target_link_libraries(${LIBNAME} ${ARGN})
+    target_link_libraries(${LIBNAME}_static ${ARGN})
+endfunction(target_link_dual_libraries)
+
+function(output_directory TGT DIR)
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+endfunction(output_directory TGT DIR)
+
+function(dual_output_directory TGT DIR)
+    output_directory(${TGT} "${DIR}")
+    output_directory(${TGT}_static "${DIR}")
+endfunction(dual_output_directory TGT DIR)
+
+if (NOT GENERATED_JAVAH)
+    # Must identify where the generated headers have been placed
+    MESSAGE(FATAL_ERROR "You must set the cmake variable GENERATED_JAVAH")
+endif (NOT GENERATED_JAVAH)
+find_package(JNI REQUIRED)
+find_package(ZLIB REQUIRED)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
+set(D main/native/src/org/apache/hadoop)
+
+GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
+
+INCLUDE(CheckFunctionExists)
+INCLUDE(CheckCSourceCompiles)
+CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
+CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
+
+find_library(SNAPPY_LIBRARY NAMES snappy PATHS)
+find_path(SNAPPY_INCLUDE_DIR NAMES snappy.h PATHS)
+if (SNAPPY_LIBRARY)
+    GET_FILENAME_COMPONENT(HADOOP_SNAPPY_LIBRARY ${SNAPPY_LIBRARY} NAME)
+    set(SNAPPY_SOURCE_FILES
+        "${D}/io/compress/snappy/SnappyCompressor.c"
+        "${D}/io/compress/snappy/SnappyDecompressor.c")
+else (${SNAPPY_LIBRARY})
+    set(SNAPPY_INCLUDE_DIR "")
+    set(SNAPPY_SOURCE_FILES "")
+endif (SNAPPY_LIBRARY)
+
+include_directories(
+    ${GENERATED_JAVAH}
+    main/native/src
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/src
+    ${CMAKE_BINARY_DIR}
+    ${JNI_INCLUDE_DIRS}
+    ${ZLIB_INCLUDE_DIRS}
+    ${SNAPPY_INCLUDE_DIR}
+)
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+add_dual_library(hadoop
+    ${D}/io/compress/lz4/Lz4Compressor.c
+    ${D}/io/compress/lz4/Lz4Decompressor.c
+    ${D}/io/compress/lz4/lz4.c
+    ${SNAPPY_SOURCE_FILES}
+    ${D}/io/compress/zlib/ZlibCompressor.c
+    ${D}/io/compress/zlib/ZlibDecompressor.c
+    ${D}/io/nativeio/NativeIO.c
+    ${D}/io/nativeio/errno_enum.c
+    ${D}/io/nativeio/file_descriptor.c
+    ${D}/security/JniBasedUnixGroupsMapping.c
+    ${D}/security/JniBasedUnixGroupsNetgroupMapping.c
+    ${D}/security/getGroup.c
+    ${D}/util/NativeCrc32.c
+    ${D}/util/bulk_crc32.c
+)
+target_link_dual_libraries(hadoop
+    dl
+    ${JAVA_JVM_LIBRARY}
+)
+SET(LIBHADOOP_VERSION "1.0.0")
+SET_TARGET_PROPERTIES(hadoop PROPERTIES
+    SOVERSION ${LIBHADOOP_VERSION})
+dual_output_directory(hadoop target/usr/local/lib)

+ 10 - 0
hadoop-common-project/hadoop-common/src/config.h.cmake

@@ -0,0 +1,10 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
+#cmakedefine HADOOP_RUNAS_HOME "@HADOOP_RUNAS_HOME@"
+#cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
+#cmakedefine HAVE_SYNC_FILE_RANGE
+#cmakedefine HAVE_POSIX_FADVISE
+
+#endif

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -61,7 +61,7 @@ export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
 export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
 
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

+ 32 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1070,6 +1070,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
+  /**
+   * Gets the absolute path to the resource object (file, URL, etc.), for a given
+   * property name.
+   *
+   * @param name - The property name to get the source of.
+   * @return null - If the property or its source wasn't found or if the property
+   * was defined in code (i.e. in a Configuration instance, not from a physical
+   * resource). Otherwise, returns the absolute path of the resource that loaded
+   * the property name, as a String.
+   */
+  @InterfaceStability.Unstable
+  public synchronized String getPropertySource(String name) {
+    if (properties == null) {
+      // If properties is null, it means a resource was newly added
+      // but the props were cleared so as to load it upon future
+      // requests. So lets force a load by asking a properties list.
+      getProps();
+    }
+    // Return a null right away if our properties still
+    // haven't loaded or the resource mapping isn't defined
+    if (properties == null || updatingResource == null) {
+      return null;
+    } else {
+      String source = updatingResource.get(name);
+      if (source == null || source.equals(UNKNOWN_RESOURCE)) {
+        return null;
+      } else {
+        return source;
+      }
+    }
+  }
+
   /**
    * A class that represents a set of positive integer ranges. It parses 
    * strings of the form: "2-3,5,7-" where ranges are separated by comma and 

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -63,7 +63,9 @@ public class CommonConfigurationKeysPublic {
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
     "net.topology.node.switch.mapping.impl";
-  
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  NET_TOPOLOGY_IMPL_KEY =
+    "net.topology.impl";
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
     "net.topology.table.file.name";

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.security.token.delegation;
+package org.apache.hadoop.fs;
 
 import java.io.IOException;
 import java.lang.ref.WeakReference;
@@ -25,7 +25,6 @@ import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
@@ -161,4 +160,4 @@ public class DelegationTokenRenewer<T extends FileSystem & DelegationTokenRenewe
       }
     }
   }
-}
+}

+ 8 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -254,7 +254,7 @@ public class FileStatus implements Writable, Comparable {
   // Writable
   //////////////////////////////////////////////////
   public void write(DataOutput out) throws IOException {
-    Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE);
+    Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
     out.writeLong(getLen());
     out.writeBoolean(isDirectory());
     out.writeShort(getReplication());
@@ -262,16 +262,16 @@ public class FileStatus implements Writable, Comparable {
     out.writeLong(getModificationTime());
     out.writeLong(getAccessTime());
     getPermission().write(out);
-    Text.writeString(out, getOwner(), Text.ONE_MEGABYTE);
-    Text.writeString(out, getGroup(), Text.ONE_MEGABYTE);
+    Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
+    Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
     out.writeBoolean(isSymlink());
     if (isSymlink()) {
-      Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE);
+      Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
     }
   }
 
   public void readFields(DataInput in) throws IOException {
-    String strPath = Text.readString(in, Text.ONE_MEGABYTE);
+    String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
     this.path = new Path(strPath);
     this.length = in.readLong();
     this.isdir = in.readBoolean();
@@ -280,10 +280,10 @@ public class FileStatus implements Writable, Comparable {
     modification_time = in.readLong();
     access_time = in.readLong();
     permission.readFields(in);
-    owner = Text.readString(in, Text.ONE_MEGABYTE);
-    group = Text.readString(in, Text.ONE_MEGABYTE);
+    owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
+    group = Text.readString(in, Text.DEFAULT_MAX_LEN);
     if (in.readBoolean()) {
-      this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE));
+      this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
     } else {
       this.symlink = null;
     }

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java

@@ -84,8 +84,8 @@ public class PermissionStatus implements Writable {
 
   /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
-    username = Text.readString(in, Text.ONE_MEGABYTE);
-    groupname = Text.readString(in, Text.ONE_MEGABYTE);
+    username = Text.readString(in, Text.DEFAULT_MAX_LEN);
+    groupname = Text.readString(in, Text.DEFAULT_MAX_LEN);
     permission = FsPermission.read(in);
   }
 
@@ -110,8 +110,8 @@ public class PermissionStatus implements Writable {
                            String username, 
                            String groupname,
                            FsPermission permission) throws IOException {
-    Text.writeString(out, username, Text.ONE_MEGABYTE);
-    Text.writeString(out, groupname, Text.ONE_MEGABYTE);
+    Text.writeString(out, username, Text.DEFAULT_MAX_LEN);
+    Text.writeString(out, groupname, Text.DEFAULT_MAX_LEN);
     permission.write(out);
   }
 

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -52,7 +52,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.mortbay.io.Buffer;
@@ -606,6 +608,24 @@ public class HttpServer implements FilterContainer {
     sslListener.setNeedClientAuth(needCertsAuth);
     webServer.addConnector(sslListener);
   }
+  
+  protected void initSpnego(Configuration conf,
+      String usernameConfKey, String keytabConfKey) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    String principalInConf = conf.get(usernameConfKey);
+    if (principalInConf != null && !principalInConf.isEmpty()) {
+      params.put("kerberos.principal",
+                 SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
+    }
+    String httpKeytab = conf.get(keytabConfKey);
+    if (httpKeytab != null && !httpKeytab.isEmpty()) {
+      params.put("kerberos.keytab", httpKeytab);
+    }
+    params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+  
+    defineFilter(webAppContext, SPNEGO_FILTER,
+                 AuthenticationFilter.class.getName(), params, null);
+  }
 
   /**
    * Start the server. Does not wait for the server to start.

+ 33 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import java.net.Socket;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
 
 import org.apache.commons.logging.Log;
 
@@ -245,4 +248,34 @@ public class IOUtils {
     public void write(int b) throws IOException {
     }
   }  
+  
+  /**
+   * Write a ByteBuffer to a WritableByteChannel, handling short writes.
+   * 
+   * @param bc               The WritableByteChannel to write to
+   * @param buf              The input buffer
+   * @throws IOException     On I/O error
+   */
+  public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
+      throws IOException {
+    do {
+      bc.write(buf);
+    } while (buf.remaining() > 0);
+  }
+
+  /**
+   * Write a ByteBuffer to a FileChannel at a given offset, 
+   * handling short writes.
+   * 
+   * @param fc               The FileChannel to write to
+   * @param buf              The input buffer
+   * @param offset           The offset in the file to start writing at
+   * @throws IOException     On I/O error
+   */
+  public static void writeFully(FileChannel fc, ByteBuffer buf,
+      long offset) throws IOException {
+    do {
+      offset += fc.write(buf, offset);
+    } while (buf.remaining() > 0);
+  }
 }

+ 28 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -287,6 +287,20 @@ public class Text extends BinaryComparable
     in.readFully(bytes, 0, newLength);
     length = newLength;
   }
+  
+  public void readFields(DataInput in, int maxLength) throws IOException {
+    int newLength = WritableUtils.readVInt(in);
+    if (newLength < 0) {
+      throw new IOException("tried to deserialize " + newLength +
+          " bytes of data!  newLength must be non-negative.");
+    } else if (newLength >= maxLength) {
+      throw new IOException("tried to deserialize " + newLength +
+          " bytes of data, but maxLength = " + maxLength);
+    }
+    setCapacity(newLength, false);
+    in.readFully(bytes, 0, newLength);
+    length = newLength;
+  }
 
   /** Skips over one Text in the input. */
   public static void skip(DataInput in) throws IOException {
@@ -304,6 +318,16 @@ public class Text extends BinaryComparable
     out.write(bytes, 0, length);
   }
 
+  public void write(DataOutput out, int maxLength) throws IOException {
+    if (length > maxLength) {
+      throw new IOException("data was too long to write!  Expected " +
+          "less than or equal to " + maxLength + " bytes, but got " +
+          length + " bytes.");
+    }
+    WritableUtils.writeVInt(out, length);
+    out.write(bytes, 0, length);
+  }
+
   /** Returns true iff <code>o</code> is a Text with the same contents.  */
   public boolean equals(Object o) {
     if (o instanceof Text)
@@ -417,7 +441,7 @@ public class Text extends BinaryComparable
     return bytes;
   }
 
-  static final public int ONE_MEGABYTE = 1024 * 1024;
+  static final public int DEFAULT_MAX_LEN = 1024 * 1024;
 
   /** Read a UTF8 encoded string from in
    */
@@ -432,7 +456,7 @@ public class Text extends BinaryComparable
    */
   public static String readString(DataInput in, int maxLength)
       throws IOException {
-    int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1);
+    int length = WritableUtils.readVIntInRange(in, 0, maxLength);
     byte [] bytes = new byte[length];
     in.readFully(bytes, 0, length);
     return decode(bytes);
@@ -454,9 +478,9 @@ public class Text extends BinaryComparable
       throws IOException {
     ByteBuffer bytes = encode(s);
     int length = bytes.limit();
-    if (length >= maxLength) {
+    if (length > maxLength) {
       throw new IOException("string was too long to write!  Expected " +
-          "less than " + maxLength + " bytes, but got " +
+          "less than or equal to " + maxLength + " bytes, but got " +
           length + " bytes.");
     }
     WritableUtils.writeVInt(out, length);

+ 8 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.ipc.RpcInvocationHandler;
 
 class RetryInvocationHandler implements RpcInvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
-  private FailoverProxyProvider proxyProvider;
+  private final FailoverProxyProvider proxyProvider;
 
   /**
    * The number of times the associated proxyProvider has ever been failed over.
@@ -41,26 +41,25 @@ class RetryInvocationHandler implements RpcInvocationHandler {
   private long proxyProviderFailoverCount = 0;
   private volatile boolean hasMadeASuccessfulCall = false;
   
-  private RetryPolicy defaultPolicy;
-  private Map<String,RetryPolicy> methodNameToPolicyMap;
+  private final RetryPolicy defaultPolicy;
+  private final Map<String,RetryPolicy> methodNameToPolicyMap;
   private Object currentProxy;
   
   public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
       RetryPolicy retryPolicy) {
-    this.proxyProvider = proxyProvider;
-    this.defaultPolicy = retryPolicy;
-    this.methodNameToPolicyMap = Collections.emptyMap();
-    this.currentProxy = proxyProvider.getProxy();
+    this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
   }
-  
+
   public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+      RetryPolicy defaultPolicy,
       Map<String, RetryPolicy> methodNameToPolicyMap) {
     this.proxyProvider = proxyProvider;
-    this.defaultPolicy = RetryPolicies.TRY_ONCE_THEN_FAIL;
+    this.defaultPolicy = defaultPolicy;
     this.methodNameToPolicyMap = methodNameToPolicyMap;
     this.currentProxy = proxyProvider.getProxy();
   }
 
+  @Override
   public Object invoke(Object proxy, Method method, Object[] args)
     throws Throwable {
     RetryPolicy policy = methodNameToPolicyMap.get(method.getName());

+ 230 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -22,10 +22,13 @@ import java.net.ConnectException;
 import java.net.NoRouteToHostException;
 import java.net.SocketException;
 import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Map.Entry;
+import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -33,8 +36,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * <p>
  * A collection of useful implementations of {@link RetryPolicy}.
@@ -44,7 +45,12 @@ public class RetryPolicies {
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
-  private static final Random RAND = new Random();
+  private static ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
+    @Override
+    protected Random initialValue() {
+      return new Random();
+    }
+  };
   
   /**
    * <p>
@@ -157,17 +163,35 @@ public class RetryPolicies {
     }
   }
   
+  /**
+   * Retry up to maxRetries.
+   * The actual sleep time of the n-th retry is f(n, sleepTime),
+   * where f is a function provided by the subclass implementation.
+   *
+   * The object of the subclasses should be immutable;
+   * otherwise, the subclass must override hashCode(), equals(..) and toString().
+   */
   static abstract class RetryLimited implements RetryPolicy {
-    int maxRetries;
-    long sleepTime;
-    TimeUnit timeUnit;
+    final int maxRetries;
+    final long sleepTime;
+    final TimeUnit timeUnit;
     
-    public RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+    private String myString;
+
+    RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+      if (maxRetries < 0) {
+        throw new IllegalArgumentException("maxRetries = " + maxRetries+" < 0");
+      }
+      if (sleepTime < 0) {
+        throw new IllegalArgumentException("sleepTime = " + sleepTime + " < 0");
+      }
+
       this.maxRetries = maxRetries;
       this.sleepTime = sleepTime;
       this.timeUnit = timeUnit;
     }
 
+    @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
         boolean isMethodIdempotent) throws Exception {
       if (retries >= maxRetries) {
@@ -178,6 +202,30 @@ public class RetryPolicies {
     }
     
     protected abstract long calculateSleepTime(int retries);
+    
+    @Override
+    public int hashCode() {
+      return toString().hashCode();
+    }
+    
+    @Override
+    public boolean equals(final Object that) {
+      if (this == that) {
+        return true;
+      } else if (that == null || this.getClass() != that.getClass()) {
+        return false;
+      }
+      return this.toString().equals(that.toString());
+    }
+
+    @Override
+    public String toString() {
+      if (myString == null) {
+        myString = getClass().getSimpleName() + "(maxRetries=" + maxRetries
+            + ", sleepTime=" + sleepTime + " " + timeUnit + ")";
+      }
+      return myString;
+    }
   }
   
   static class RetryUpToMaximumCountWithFixedSleep extends RetryLimited {
@@ -208,6 +256,169 @@ public class RetryPolicies {
     }
   }
   
+  /**
+   * Given pairs of number of retries and sleep time (n0, t0), (n1, t1), ...,
+   * the first n0 retries sleep t0 milliseconds on average,
+   * the following n1 retries sleep t1 milliseconds on average, and so on.
+   * 
+   * For all the sleep, the actual sleep time is randomly uniform distributed
+   * in the close interval [0.5t, 1.5t], where t is the sleep time specified.
+   *
+   * The objects of this class are immutable.
+   */
+  public static class MultipleLinearRandomRetry implements RetryPolicy {
+    /** Pairs of numRetries and sleepSeconds */
+    public static class Pair {
+      final int numRetries;
+      final int sleepMillis;
+      
+      public Pair(final int numRetries, final int sleepMillis) {
+        if (numRetries < 0) {
+          throw new IllegalArgumentException("numRetries = " + numRetries+" < 0");
+        }
+        if (sleepMillis < 0) {
+          throw new IllegalArgumentException("sleepMillis = " + sleepMillis + " < 0");
+        }
+
+        this.numRetries = numRetries;
+        this.sleepMillis = sleepMillis;
+      }
+      
+      @Override
+      public String toString() {
+        return numRetries + "x" + sleepMillis + "ms";
+      }
+    }
+
+    private final List<Pair> pairs;
+    private String myString;
+
+    public MultipleLinearRandomRetry(List<Pair> pairs) {
+      if (pairs == null || pairs.isEmpty()) {
+        throw new IllegalArgumentException("pairs must be neither null nor empty.");
+      }
+      this.pairs = Collections.unmodifiableList(pairs);
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      final Pair p = searchPair(curRetry);
+      if (p == null) {
+        //no more retries.
+        return RetryAction.FAIL;
+      }
+
+      //calculate sleep time and return.
+      final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 <= ratio <=1.5
+      final long sleepTime = Math.round(p.sleepMillis * ratio);
+      return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
+    }
+
+    /**
+     * Given the current number of retry, search the corresponding pair.
+     * @return the corresponding pair,
+     *   or null if the current number of retry > maximum number of retry. 
+     */
+    private Pair searchPair(int curRetry) {
+      int i = 0;
+      for(; i < pairs.size() && curRetry > pairs.get(i).numRetries; i++) {
+        curRetry -= pairs.get(i).numRetries;
+      }
+      return i == pairs.size()? null: pairs.get(i);
+    }
+    
+    @Override
+    public int hashCode() {
+      return toString().hashCode();
+    }
+    
+    @Override
+    public boolean equals(final Object that) {
+      if (this == that) {
+        return true;
+      } else if (that == null || this.getClass() != that.getClass()) {
+        return false;
+      }
+      return this.toString().equals(that.toString());
+    }
+
+    @Override
+    public String toString() {
+      if (myString == null) {
+        myString = getClass().getSimpleName() + pairs;
+      }
+      return myString;
+    }
+
+    /**
+     * Parse the given string as a MultipleLinearRandomRetry object.
+     * The format of the string is "t_1, n_1, t_2, n_2, ...",
+     * where t_i and n_i are the i-th pair of sleep time and number of retires.
+     * Note that the white spaces in the string are ignored.
+     *
+     * @return the parsed object, or null if the parsing fails.
+     */
+    public static MultipleLinearRandomRetry parseCommaSeparatedString(String s) {
+      final String[] elements = s.split(",");
+      if (elements.length == 0) {
+        LOG.warn("Illegal value: there is no element in \"" + s + "\".");
+        return null;
+      }
+      if (elements.length % 2 != 0) {
+        LOG.warn("Illegal value: the number of elements in \"" + s + "\" is "
+            + elements.length + " but an even number of elements is expected.");
+        return null;
+      }
+
+      final List<RetryPolicies.MultipleLinearRandomRetry.Pair> pairs
+          = new ArrayList<RetryPolicies.MultipleLinearRandomRetry.Pair>();
+   
+      for(int i = 0; i < elements.length; ) {
+        //parse the i-th sleep-time
+        final int sleep = parsePositiveInt(elements, i++, s);
+        if (sleep == -1) {
+          return null; //parse fails
+        }
+
+        //parse the i-th number-of-retries
+        final int retries = parsePositiveInt(elements, i++, s);
+        if (retries == -1) {
+          return null; //parse fails
+        }
+
+        pairs.add(new RetryPolicies.MultipleLinearRandomRetry.Pair(retries, sleep));
+      }
+      return new RetryPolicies.MultipleLinearRandomRetry(pairs);
+    }
+
+    /**
+     * Parse the i-th element as an integer.
+     * @return -1 if the parsing fails or the parsed value <= 0;
+     *   otherwise, return the parsed value.
+     */
+    private static int parsePositiveInt(final String[] elements,
+        final int i, final String originalString) {
+      final String s = elements[i].trim();
+      final int n;
+      try {
+        n = Integer.parseInt(s);
+      } catch(NumberFormatException nfe) {
+        LOG.warn("Failed to parse \"" + s + "\", which is the index " + i
+            + " element in \"" + originalString + "\"", nfe);
+        return -1;
+      }
+
+      if (n <= 0) {
+        LOG.warn("The value " + n + " <= 0: it is parsed from the string \""
+            + s + "\" which is the index " + i + " element in \""
+            + originalString + "\"");
+        return -1;
+      }
+      return n;
+    }
+  }
+
   static class ExceptionDependentRetry implements RetryPolicy {
 
     RetryPolicy defaultPolicy;
@@ -265,6 +476,14 @@ public class RetryPolicies {
     public ExponentialBackoffRetry(
         int maxRetries, long sleepTime, TimeUnit timeUnit) {
       super(maxRetries, sleepTime, timeUnit);
+
+      if (maxRetries < 0) {
+        throw new IllegalArgumentException("maxRetries = " + maxRetries + " < 0");
+      } else if (maxRetries >= Long.SIZE - 1) {
+        //calculateSleepTime may overflow. 
+        throw new IllegalArgumentException("maxRetries = " + maxRetries
+            + " >= " + (Long.SIZE - 1));
+      }
     }
     
     @Override
@@ -353,11 +572,10 @@ public class RetryPolicies {
    * @param cap value at which to cap the base sleep time
    * @return an amount of time to sleep
    */
-  @VisibleForTesting
-  public static long calculateExponentialTime(long time, int retries,
+  private static long calculateExponentialTime(long time, int retries,
       long cap) {
-    long baseTime = Math.min(time * ((long)1 << retries), cap);
-    return (long) (baseTime * (RAND.nextFloat() + 0.5));
+    long baseTime = Math.min(time * (1L << retries), cap);
+    return (long) (baseTime * (RANDOM.get().nextDouble() + 0.5));
   }
 
   private static long calculateExponentialTime(long time, int retries) {

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java

@@ -60,6 +60,12 @@ public interface RetryPolicy {
       this.reason = reason;
     }
     
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "(action=" + action
+          + ", delayMillis=" + delayMillis + ", reason=" + reason + ")";
+    }
+    
     public enum RetryDecision {
       FAIL,
       RETRY,

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java

@@ -75,9 +75,10 @@ public class RetryProxy {
    */
   public static Object create(Class<?> iface, Object implementation,
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
-    return RetryProxy.create(iface,
+    return create(iface,
         new DefaultFailoverProxyProvider(iface, implementation),
-        methodNameToPolicyMap);
+        methodNameToPolicyMap,
+        RetryPolicies.TRY_ONCE_THEN_FAIL);
   }
 
   /**
@@ -92,11 +93,13 @@ public class RetryProxy {
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
-      Map<String,RetryPolicy> methodNameToPolicyMap) {
+      Map<String,RetryPolicy> methodNameToPolicyMap,
+      RetryPolicy defaultPolicy) {
     return Proxy.newProxyInstance(
         proxyProvider.getInterface().getClassLoader(),
         new Class<?>[] { iface },
-        new RetryInvocationHandler(proxyProvider, methodNameToPolicyMap)
+        new RetryInvocationHandler(proxyProvider, defaultPolicy,
+            methodNameToPolicyMap)
         );
   }
 }

+ 83 - 39
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -18,47 +18,51 @@
 
 package org.apache.hadoop.ipc;
 
-import java.net.InetAddress;
-import java.net.Socket;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.io.IOException;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.FilterInputStream;
+import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.io.OutputStream;
-
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Hashtable;
 import java.util.Iterator;
+import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
-import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SaslRpcClient;
@@ -67,8 +71,8 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -80,8 +84,8 @@ import org.apache.hadoop.util.ReflectionUtils;
  */
 public class Client {
   
-  public static final Log LOG =
-    LogFactory.getLog(Client.class);
+  public static final Log LOG = LogFactory.getLog(Client.class);
+
   private Hashtable<ConnectionId, Connection> connections =
     new Hashtable<ConnectionId, Connection>();
 
@@ -228,8 +232,7 @@ public class Client {
     private int rpcTimeout;
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private int maxRetries; //the max. no. of retries for socket connections
-    // the max. no. of retries for socket connections on time out exceptions
+    private final RetryPolicy connectionRetryPolicy;
     private int maxRetriesOnSocketTimeouts;
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean doPing; //do we need to send ping message
@@ -253,7 +256,7 @@ public class Client {
       }
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
-      this.maxRetries = remoteId.getMaxRetries();
+      this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
@@ -488,7 +491,7 @@ public class Client {
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
           }
-          handleConnectionFailure(ioFailures++, maxRetries, ie);
+          handleConnectionFailure(ioFailures++, ie);
         }
       }
     }
@@ -680,8 +683,36 @@ public class Client {
         Thread.sleep(1000);
       } catch (InterruptedException ignored) {}
       
-      LOG.info("Retrying connect to server: " + server + 
-          ". Already tried " + curRetries + " time(s).");
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); maxRetries=" + maxRetries);
+    }
+
+    private void handleConnectionFailure(int curRetries, IOException ioe
+        ) throws IOException {
+      closeConnection();
+
+      final RetryAction action;
+      try {
+        action = connectionRetryPolicy.shouldRetry(ioe, curRetries, 0, true);
+      } catch(Exception e) {
+        throw e instanceof IOException? (IOException)e: new IOException(e);
+      }
+      if (action.action == RetryAction.RetryDecision.FAIL) {
+        if (action.reason != null) {
+          LOG.warn("Failed to connect to server: " + server + ": "
+              + action.reason, ioe);
+        }
+        throw ioe;
+      }
+
+      try {
+        Thread.sleep(action.delayMillis);
+      } catch (InterruptedException e) {
+        throw (IOException)new InterruptedIOException("Interrupted: action="
+            + action + ", retry policy=" + connectionRetryPolicy).initCause(e);
+      }
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); retry policy is " + connectionRetryPolicy);
     }
 
     /**
@@ -849,6 +880,10 @@ public class Client {
       try {
         RpcResponseHeaderProto response = 
             RpcResponseHeaderProto.parseDelimitedFrom(in);
+        if (response == null) {
+          throw new IOException("Response is null.");
+        }
+
         int callId = response.getCallId();
         if (LOG.isDebugEnabled())
           LOG.debug(getName() + " got value #" + callId);
@@ -1287,7 +1322,7 @@ public class Client {
     private final String serverPrincipal;
     private final int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private final int maxRetries; //the max. no. of retries for socket connections
+    private final RetryPolicy connectionRetryPolicy;
     // the max. no. of retries for socket connections on time out exceptions
     private final int maxRetriesOnSocketTimeouts;
     private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -1297,7 +1332,7 @@ public class Client {
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
                  UserGroupInformation ticket, int rpcTimeout,
                  String serverPrincipal, int maxIdleTime, 
-                 int maxRetries, int maxRetriesOnSocketTimeouts,
+                 RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
                  boolean tcpNoDelay, boolean doPing, int pingInterval) {
       this.protocol = protocol;
       this.address = address;
@@ -1305,7 +1340,7 @@ public class Client {
       this.rpcTimeout = rpcTimeout;
       this.serverPrincipal = serverPrincipal;
       this.maxIdleTime = maxIdleTime;
-      this.maxRetries = maxRetries;
+      this.connectionRetryPolicy = connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
       this.tcpNoDelay = tcpNoDelay;
       this.doPing = doPing;
@@ -1336,10 +1371,6 @@ public class Client {
       return maxIdleTime;
     }
     
-    int getMaxRetries() {
-      return maxRetries;
-    }
-    
     /** max connection retries on socket time outs */
     public int getMaxRetriesOnSocketTimeouts() {
       return maxRetriesOnSocketTimeouts;
@@ -1357,6 +1388,12 @@ public class Client {
       return pingInterval;
     }
     
+    static ConnectionId getConnectionId(InetSocketAddress addr,
+        Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
+        Configuration conf) throws IOException {
+      return getConnectionId(addr, protocol, ticket, rpcTimeout, null, conf);
+    }
+
     /**
      * Returns a ConnectionId object. 
      * @param addr Remote address for the connection.
@@ -1367,9 +1404,18 @@ public class Client {
      * @return A ConnectionId instance
      * @throws IOException
      */
-    public static ConnectionId getConnectionId(InetSocketAddress addr,
+    static ConnectionId getConnectionId(InetSocketAddress addr,
         Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
-        Configuration conf) throws IOException {
+        RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException {
+
+      if (connectionRetryPolicy == null) {
+        final int max = conf.getInt(
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
+        connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+            max, 1, TimeUnit.SECONDS);
+      }
+
       String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
       boolean doPing =
         conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
@@ -1377,8 +1423,7 @@ public class Client {
           rpcTimeout, remotePrincipal,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
-          conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
-              CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
+          connectionRetryPolicy,
           conf.getInt(
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
@@ -1421,7 +1466,7 @@ public class Client {
         return isEqual(this.address, that.address)
             && this.doPing == that.doPing
             && this.maxIdleTime == that.maxIdleTime
-            && this.maxRetries == that.maxRetries
+            && isEqual(this.connectionRetryPolicy, that.connectionRetryPolicy)
             && this.pingInterval == that.pingInterval
             && isEqual(this.protocol, that.protocol)
             && this.rpcTimeout == that.rpcTimeout
@@ -1434,11 +1479,10 @@ public class Client {
     
     @Override
     public int hashCode() {
-      int result = 1;
+      int result = connectionRetryPolicy.hashCode();
       result = PRIME * result + ((address == null) ? 0 : address.hashCode());
       result = PRIME * result + (doPing ? 1231 : 1237);
       result = PRIME * result + maxIdleTime;
-      result = PRIME * result + maxRetries;
       result = PRIME * result + pingInterval;
       result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
       result = PRIME * result + rpcTimeout;

+ 19 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -36,9 +36,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputOutputStream;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-
 import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -66,15 +66,24 @@ public class ProtobufRpcEngine implements RpcEngine {
 
   private static final ClientCache CLIENTS = new ClientCache();
 
+  public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
+      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+      SocketFactory factory, int rpcTimeout) throws IOException {
+    return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+        rpcTimeout, null);
+  }
+
   @Override
   @SuppressWarnings("unchecked")
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
       InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-      SocketFactory factory, int rpcTimeout) throws IOException {
+      SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+      ) throws IOException {
 
-    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
-        .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
-        addr, ticket, conf, factory, rpcTimeout)), false);
+    final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
+        rpcTimeout, connectionRetryPolicy);
+    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(
+        protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
   }
   
   @Override
@@ -97,11 +106,12 @@ public class ProtobufRpcEngine implements RpcEngine {
     private final long clientProtocolVersion;
     private final String protocolName;
 
-    public Invoker(Class<?> protocol, InetSocketAddress addr,
+    private Invoker(Class<?> protocol, InetSocketAddress addr,
         UserGroupInformation ticket, Configuration conf, SocketFactory factory,
-        int rpcTimeout) throws IOException {
-      this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
-          ticket, rpcTimeout, conf), conf, factory);
+        int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException {
+      this(protocol, Client.ConnectionId.getConnectionId(
+          addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
+          conf, factory);
     }
     
     /**

+ 11 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -41,6 +41,7 @@ import org.apache.commons.logging.*;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.net.NetUtils;
@@ -326,7 +327,7 @@ public class RPC {
                              long clientVersion,
                              InetSocketAddress addr, Configuration conf,
                              long connTimeout) throws IOException { 
-    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, connTimeout);
+    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
   }
   
   /**
@@ -347,7 +348,7 @@ public class RPC {
                              int rpcTimeout,
                              long timeout) throws IOException {
     return waitForProtocolProxy(protocol, clientVersion, addr,
-        conf, rpcTimeout, timeout).getProxy();
+        conf, rpcTimeout, null, timeout).getProxy();
   }
 
   /**
@@ -367,6 +368,7 @@ public class RPC {
                                long clientVersion,
                                InetSocketAddress addr, Configuration conf,
                                int rpcTimeout,
+                               RetryPolicy connectionRetryPolicy,
                                long timeout) throws IOException { 
     long startTime = System.currentTimeMillis();
     IOException ioe;
@@ -374,7 +376,7 @@ public class RPC {
       try {
         return getProtocolProxy(protocol, clientVersion, addr, 
             UserGroupInformation.getCurrentUser(), conf, NetUtils
-            .getDefaultSocketFactory(conf), rpcTimeout);
+            .getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
       } catch(ConnectException se) {  // namenode has not been started
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
         ioe = se;
@@ -463,7 +465,7 @@ public class RPC {
                                 Configuration conf,
                                 SocketFactory factory) throws IOException {
     return getProtocolProxy(
-        protocol, clientVersion, addr, ticket, conf, factory, 0);
+        protocol, clientVersion, addr, ticket, conf, factory, 0, null);
   }
   
   /**
@@ -489,7 +491,7 @@ public class RPC {
                                 SocketFactory factory,
                                 int rpcTimeout) throws IOException {
     return getProtocolProxy(protocol, clientVersion, addr, ticket,
-             conf, factory, rpcTimeout).getProxy();
+             conf, factory, rpcTimeout, null).getProxy();
   }
   
   /**
@@ -512,12 +514,13 @@ public class RPC {
                                 UserGroupInformation ticket,
                                 Configuration conf,
                                 SocketFactory factory,
-                                int rpcTimeout) throws IOException {    
+                                int rpcTimeout,
+                                RetryPolicy connectionRetryPolicy) throws IOException {    
     if (UserGroupInformation.isSecurityEnabled()) {
       SaslRpcServer.init(conf);
     }
-    return getProtocolEngine(protocol,conf).getProxy(protocol,
-        clientVersion, addr, ticket, conf, factory, rpcTimeout);
+    return getProtocolEngine(protocol,conf).getProxy(protocol, clientVersion,
+        addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy);
   }
 
    /**

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java

@@ -97,8 +97,9 @@ public class RemoteException extends IOException {
     return new RemoteException(attrs.getValue("class"),
         attrs.getValue("message")); 
   }
-  
+
+  @Override
   public String toString() {
-    return className + ": " + getMessage();
+    return getClass().getName() + "(" + className + "): " + getMessage();
   }
 }

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java

@@ -26,6 +26,7 @@ import javax.net.SocketFactory;
 
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -40,7 +41,8 @@ public interface RpcEngine {
   <T> ProtocolProxy<T> getProxy(Class<T> protocol,
                   long clientVersion, InetSocketAddress addr,
                   UserGroupInformation ticket, Configuration conf,
-                  SocketFactory factory, int rpcTimeout) throws IOException;
+                  SocketFactory factory, int rpcTimeout,
+                  RetryPolicy connectionRetryPolicy) throws IOException;
 
   /** Expert: Make multiple, parallel calls to a set of servers. */
   Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -31,6 +31,7 @@ import javax.net.SocketFactory;
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -259,9 +260,14 @@ public class WritableRpcEngine implements RpcEngine {
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
                          InetSocketAddress addr, UserGroupInformation ticket,
                          Configuration conf, SocketFactory factory,
-                         int rpcTimeout)
+                         int rpcTimeout, RetryPolicy connectionRetryPolicy)
     throws IOException {    
 
+    if (connectionRetryPolicy != null) {
+      throw new UnsupportedOperationException(
+          "Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
+    }
+
     T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
         new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf,
             factory, rpcTimeout));

+ 142 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -55,8 +56,8 @@ public class NetworkTopology {
   /** InnerNode represents a switch/router of a data center or rack.
    * Different from a leaf node, it has non-null children.
    */
-  private class InnerNode extends NodeBase {
-    private ArrayList<Node> children=new ArrayList<Node>();
+  static class InnerNode extends NodeBase {
+    protected List<Node> children=new ArrayList<Node>();
     private int numOfLeaves;
         
     /** Construct an InnerNode from a path-like string */
@@ -76,7 +77,7 @@ public class NetworkTopology {
     }
         
     /** @return its children */
-    Collection<Node> getChildren() {return children;}
+    List<Node> getChildren() {return children;}
         
     /** @return the number of children this node has */
     int getNumOfChildren() {
@@ -182,7 +183,23 @@ public class NetworkTopology {
         }
       }
     }
-        
+
+    /**
+     * Creates a parent node to be added to the list of children.  
+     * Creates a node using the InnerNode four argument constructor specifying 
+     * the name, location, parent, and level of this node.
+     * 
+     * <p>To be overridden in subclasses for specific InnerNode implementations,
+     * as alternative to overriding the full {@link #add(Node)} method.
+     * 
+     * @param parentName The name of the parent node
+     * @return A new inner node
+     * @see InnerNode#InnerNode(String, String, InnerNode, int)
+     */
+    protected InnerNode createParentNode(String parentName) {
+      return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
+    }
+
     /** Remove node <i>n</i> from the subtree of this node
      * @param n node to be deleted 
      * @return true if the node is deleted; false otherwise
@@ -263,7 +280,7 @@ public class NetworkTopology {
      * @param excludedNode an excluded node (can be null)
      * @return
      */
-    private Node getLeaf(int leafIndex, Node excludedNode) {
+    Node getLeaf(int leafIndex, Node excludedNode) {
       int count=0;
       // check if the excluded node a leaf
       boolean isLeaf =
@@ -308,7 +325,21 @@ public class NetworkTopology {
         return null;
       }
     }
-        
+    
+    /**
+      * Determine if children a leaves, default implementation calls {@link #isRack()}
+      * <p>To be overridden in subclasses for specific InnerNode implementations,
+      * as alternative to overriding the full {@link #getLeaf(int, Node)} method.
+      * 
+      * @return true if children are leaves, false otherwise
+      */
+    protected boolean areChildrenLeaves() {
+      return isRack();
+    }
+
+    /**
+     * Get number of leaves.
+     */
     int getNumOfLeaves() {
       return numOfLeaves;
     }
@@ -317,18 +348,18 @@ public class NetworkTopology {
   /**
    * the root cluster map
    */
-  InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
+  InnerNode clusterMap;
   /** Depth of all leaf nodes */
   private int depthOfAllLeaves = -1;
   /** rack counter */
-  private int numOfRacks = 0;
+  protected int numOfRacks = 0;
   /** the lock used to manage access */
-  private ReadWriteLock netlock;
-    
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+
   public NetworkTopology() {
-    netlock = new ReentrantReadWriteLock();
+    clusterMap = new InnerNode(InnerNode.ROOT);
   }
-    
+
   /** Add a leaf node
    * Update node counter & rack counter if necessary
    * @param node node to be added; can be null
@@ -344,7 +375,7 @@ public class NetworkTopology {
     }
     netlock.writeLock().lock();
     try {
-      Node rack = getNode(node.getNetworkLocation());
+      Node rack = getNodeForNetworkLocation(node);
       if (rack != null && !(rack instanceof InnerNode)) {
         throw new IllegalArgumentException("Unexpected data node " 
                                            + node.toString() 
@@ -376,7 +407,26 @@ public class NetworkTopology {
       netlock.writeLock().unlock();
     }
   }
-    
+  
+  /**
+   * Return a reference to the node given its string representation.
+   * Default implementation delegates to {@link #getNode(String)}.
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full {@link #add(Node)}
+   *  method.
+   * 
+   * @param node The string representation of this node's network location is
+   * used to retrieve a Node object. 
+   * @return a reference to the node; null if the node is not in the tree
+   * 
+   * @see #add(Node)
+   * @see #getNode(String)
+   */
+  protected Node getNodeForNetworkLocation(Node node) {
+    return getNode(node.getNetworkLocation());
+  }
+  
   /** Remove a node
    * Update node counter and rack counter if necessary
    * @param node node to be removed; can be null
@@ -403,7 +453,7 @@ public class NetworkTopology {
       netlock.writeLock().unlock();
     }
   }
-       
+
   /** Check if the tree contains node <i>node</i>
    * 
    * @param node a node
@@ -443,7 +493,21 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+  
+  /** Given a string representation of a rack for a specific network
+   *  location
+   * 
+   * To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #getRack(String)} method.
+   * @param loc
+   *          a path-like string representation of a network location
+   * @return a rack string
+   */
+  public String getRack(String loc) {
+    return loc;
+  }
+  
   /** @return the total number of racks */
   public int getNumOfRacks() {
     netlock.readLock().lock();
@@ -453,7 +517,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** @return the total number of leaf nodes */
   public int getNumOfLeaves() {
     netlock.readLock().lock();
@@ -463,7 +527,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** Return the distance between two nodes
    * It is assumed that the distance from one node to its parent is 1
    * The distance between two nodes is calculated by summing up their distances
@@ -509,8 +573,8 @@ public class NetworkTopology {
       return Integer.MAX_VALUE;
     }
     return dis+2;
-  } 
-    
+  }
+
   /** Check if two nodes are on the same rack
    * @param node1 one node (can be null)
    * @param node2 another node (can be null)
@@ -525,13 +589,44 @@ public class NetworkTopology {
       
     netlock.readLock().lock();
     try {
-      return node1.getParent()==node2.getParent();
+      return isSameParents(node1, node2);
     } finally {
       netlock.readLock().unlock();
     }
   }
-    
-  final private static Random r = new Random();
+  
+  /**
+   * Check if network topology is aware of NodeGroup
+   */
+  public boolean isNodeGroupAware() {
+    return false;
+  }
+  
+  /** 
+   * Return false directly as not aware of NodeGroup, to be override in sub-class
+   */
+  public boolean isOnSameNodeGroup(Node node1, Node node2) {
+    return false;
+  }
+
+  /**
+   * Compare the parents of each node for equality
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #isOnSameRack(Node, Node)} method.
+   * 
+   * @param node1 the first node to compare
+   * @param node2 the second node to compare
+   * @return true if their parents are equal, false otherwise
+   * 
+   * @see #isOnSameRack(Node, Node)
+   */
+  protected boolean isSameParents(Node node1, Node node2) {
+    return node1.getParent()==node2.getParent();
+  }
+
+  final protected static Random r = new Random();
   /** randomly choose one node from <i>scope</i>
    * if scope starts with ~, choose one from the all nodes except for the
    * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
@@ -550,7 +645,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   private Node chooseRandom(String scope, String excludedScope){
     if (excludedScope != null) {
       if (scope.startsWith(excludedScope)) {
@@ -579,7 +674,25 @@ public class NetworkTopology {
     int leaveIndex = r.nextInt(numOfDatanodes);
     return innerNode.getLeaf(leaveIndex, node);
   }
-       
+
+  /** return leaves in <i>scope</i>
+   * @param scope a path string
+   * @return leaves nodes under specific scope
+   */
+  public List<Node> getLeaves(String scope) {
+    Node node = getNode(scope);
+    List<Node> leafNodes = new ArrayList<Node>();
+    if (!(node instanceof InnerNode)) {
+      leafNodes.add(node);
+    } else {
+      InnerNode innerNode = (InnerNode) node;
+      for (int i=0;i<innerNode.getNumOfLeaves();i++) {
+        leafNodes.add(innerNode.getLeaf(i, null));
+      }
+    }
+    return leafNodes;
+  }
+
   /** return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
    * if scope starts with ~, return the number of nodes that are not
    * in <i>scope</i> and <i>excludedNodes</i>; 
@@ -619,7 +732,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** convert a network tree to a string */
   public String toString() {
     // print the number of racks
@@ -640,13 +753,12 @@ public class NetworkTopology {
     return tree.toString();
   }
 
-  /* swap two array items */
-  static private void swap(Node[] nodes, int i, int j) {
+  /** swap two array items */
+  static protected void swap(Node[] nodes, int i, int j) {
     Node tempNode;
     tempNode = nodes[j];
     nodes[j] = nodes[i];
     nodes[i] = tempNode;
-    
   }
   
   /** Sort nodes array by their distances to <i>reader</i>
@@ -697,4 +809,5 @@ public class NetworkTopology {
       swap(nodes, 0, r.nextInt(nodes.length));
     }
   }
+  
 }

+ 398 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java

@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The class extends NetworkTopology to represents a cluster of computer with
+ *  a 4-layers hierarchical network topology.
+ * In this network topology, leaves represent data nodes (computers) and inner
+ * nodes represent switches/routers that manage traffic in/out of data centers,
+ * racks or physical host (with virtual switch).
+ * 
+ * @see NetworkTopology
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class NetworkTopologyWithNodeGroup extends NetworkTopology {
+
+  public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
+
+  public NetworkTopologyWithNodeGroup() {
+    clusterMap = new InnerNodeWithNodeGroup(InnerNode.ROOT);
+  }
+
+  @Override
+  protected Node getNodeForNetworkLocation(Node node) {
+    // if node only with default rack info, here we need to add default
+    // nodegroup info
+    if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+      node.setNetworkLocation(node.getNetworkLocation()
+          + DEFAULT_NODEGROUP);
+    }
+    Node nodeGroup = getNode(node.getNetworkLocation());
+    if (nodeGroup == null) {
+      nodeGroup = new InnerNode(node.getNetworkLocation());
+    }
+    return getNode(nodeGroup.getNetworkLocation());
+  }
+
+  @Override
+  public String getRack(String loc) {
+    netlock.readLock().lock();
+    try {
+      loc = InnerNode.normalize(loc);
+      Node locNode = getNode(loc);
+      if (locNode instanceof InnerNodeWithNodeGroup) {
+        InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+        if (node.isRack()) {
+          return loc;
+        } else if (node.isNodeGroup()) {
+          return node.getNetworkLocation();
+        } else {
+          // may be a data center
+          return null;
+        }
+      } else {
+        // not in cluster map, don't handle it
+        return loc;
+      }
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Given a string representation of a node group for a specific network
+   * location
+   * 
+   * @param loc
+   *            a path-like string representation of a network location
+   * @return a node group string
+   */
+  public String getNodeGroup(String loc) {
+    netlock.readLock().lock();
+    try {
+      loc = InnerNode.normalize(loc);
+      Node locNode = getNode(loc);
+      if (locNode instanceof InnerNodeWithNodeGroup) {
+        InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+        if (node.isNodeGroup()) {
+          return loc;
+        } else if (node.isRack()) {
+          // not sure the node group for a rack
+          return null;
+        } else {
+          // may be a leaf node
+          return getNodeGroup(node.getNetworkLocation());
+        }
+      } else {
+        // not in cluster map, don't handle it
+        return loc;
+      }
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public boolean isOnSameRack( Node node1,  Node node2) {
+    if (node1 == null || node2 == null ||
+        node1.getParent() == null || node2.getParent() == null) {
+      return false;
+    }
+      
+    netlock.readLock().lock();
+    try {
+      return isSameParents(node1.getParent(), node2.getParent());
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Check if two nodes are on the same node group (hypervisor) The
+   * assumption here is: each nodes are leaf nodes.
+   * 
+   * @param node1
+   *            one node (can be null)
+   * @param node2
+   *            another node (can be null)
+   * @return true if node1 and node2 are on the same node group; false
+   *         otherwise
+   * @exception IllegalArgumentException
+   *                when either node1 or node2 is null, or node1 or node2 do
+   *                not belong to the cluster
+   */
+  @Override
+  public boolean isOnSameNodeGroup(Node node1, Node node2) {
+    if (node1 == null || node2 == null) {
+      return false;
+    }
+    netlock.readLock().lock();
+    try {
+      return isSameParents(node1, node2);
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Check if network topology is aware of NodeGroup
+   */
+  @Override
+  public boolean isNodeGroupAware() {
+    return true;
+  }
+
+  /** Add a leaf node
+   * Update node counter & rack counter if necessary
+   * @param node node to be added; can be null
+   * @exception IllegalArgumentException if add a node to a leave 
+   *                                     or node to be added is not a leaf
+   */
+  @Override
+  public void add(Node node) {
+    if (node==null) return;
+    if( node instanceof InnerNode ) {
+      throw new IllegalArgumentException(
+        "Not allow to add an inner node: "+NodeBase.getPath(node));
+    }
+    netlock.writeLock().lock();
+    try {
+      Node rack = null;
+
+      // if node only with default rack info, here we need to add default 
+      // nodegroup info
+      if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+        node.setNetworkLocation(node.getNetworkLocation() + 
+            NetworkTopologyWithNodeGroup.DEFAULT_NODEGROUP);
+      }
+      Node nodeGroup = getNode(node.getNetworkLocation());
+      if (nodeGroup == null) {
+        nodeGroup = new InnerNodeWithNodeGroup(node.getNetworkLocation());
+      }
+      rack = getNode(nodeGroup.getNetworkLocation());
+
+      if (rack != null && !(rack instanceof InnerNode)) {
+        throw new IllegalArgumentException("Unexpected data node " 
+            + node.toString() 
+            + " at an illegal network location");
+      }
+      if (clusterMap.add(node)) {
+        LOG.info("Adding a new node: " + NodeBase.getPath(node));
+        if (rack == null) {
+          // We only track rack number here
+          numOfRacks++;
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("NetworkTopology became:\n" + this.toString());
+      }
+    } finally {
+      netlock.writeLock().unlock();
+    }
+  }
+
+  /** Remove a node
+   * Update node counter and rack counter if necessary
+   * @param node node to be removed; can be null
+   */
+  @Override
+  public void remove(Node node) {
+    if (node==null) return;
+    if( node instanceof InnerNode ) {
+      throw new IllegalArgumentException(
+          "Not allow to remove an inner node: "+NodeBase.getPath(node));
+    }
+    LOG.info("Removing a node: "+NodeBase.getPath(node));
+    netlock.writeLock().lock();
+    try {
+      if (clusterMap.remove(node)) {
+        Node nodeGroup = getNode(node.getNetworkLocation());
+        if (nodeGroup == null) {
+          nodeGroup = new InnerNode(node.getNetworkLocation());
+        }
+        InnerNode rack = (InnerNode)getNode(nodeGroup.getNetworkLocation());
+        if (rack == null) {
+          numOfRacks--;
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("NetworkTopology became:\n" + this.toString());
+      }
+    } finally {
+      netlock.writeLock().unlock();
+    }
+  }
+
+  /** Sort nodes array by their distances to <i>reader</i>
+   * It linearly scans the array, if a local node is found, swap it with
+   * the first element of the array.
+   * If a local node group node is found, swap it with the first element 
+   * following the local node.
+   * If a local rack node is found, swap it with the first element following
+   * the local node group node.
+   * If neither local node, node group node or local rack node is found, put a 
+   * random replica location at position 0.
+   * It leaves the rest nodes untouched.
+   * @param reader the node that wishes to read a block from one of the nodes
+   * @param nodes the list of nodes containing data for the reader
+   */
+  @Override
+  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
+
+    if (reader != null && !this.contains(reader)) {
+      // if reader is not a datanode (not in NetworkTopology tree), we will 
+      // replace this reader with a sibling leaf node in tree.
+      Node nodeGroup = getNode(reader.getNetworkLocation());
+      if (nodeGroup != null && nodeGroup instanceof InnerNode) {
+        InnerNode parentNode = (InnerNode) nodeGroup;
+        // replace reader with the first children of its parent in tree
+        reader = parentNode.getLeaf(0, null);
+      } else {
+        return;
+      }
+    }
+    int tempIndex = 0;
+    int localRackNode = -1;
+    int localNodeGroupNode = -1;
+    if (reader != null) {  
+      //scan the array to find the local node & local rack node
+      for (int i = 0; i < nodes.length; i++) {
+        if (tempIndex == 0 && reader == nodes[i]) { //local node
+          //swap the local node and the node at position 0
+          if (i != 0) {
+            swap(nodes, tempIndex, i);
+          }
+          tempIndex=1;
+
+          if (localRackNode != -1 && (localNodeGroupNode !=-1)) {
+            if (localRackNode == 0) {
+              localRackNode = i;
+            }
+            if (localNodeGroupNode == 0) {
+              localNodeGroupNode = i;
+            }
+            break;
+          }
+        } else if (localNodeGroupNode == -1 && isOnSameNodeGroup(reader, 
+            nodes[i])) {
+          //local node group
+          localNodeGroupNode = i;
+          // node local and rack local are already found
+          if(tempIndex != 0 && localRackNode != -1) break;
+        } else if (localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
+          localRackNode = i;
+          if (tempIndex != 0 && localNodeGroupNode != -1) break;
+        }
+      }
+
+      // swap the local nodegroup node and the node at position tempIndex
+      if(localNodeGroupNode != -1 && localNodeGroupNode != tempIndex) {
+        swap(nodes, tempIndex, localNodeGroupNode);
+        if (localRackNode == tempIndex) {
+          localRackNode = localNodeGroupNode;
+        }
+        tempIndex++;
+      }
+
+      // swap the local rack node and the node at position tempIndex
+      if(localRackNode != -1 && localRackNode != tempIndex) {
+        swap(nodes, tempIndex, localRackNode);
+        tempIndex++;
+      }
+    }
+
+    // put a random node at position 0 if there is not a local/local-nodegroup/
+    // local-rack node
+    if (tempIndex == 0 && localNodeGroupNode == -1 && localRackNode == -1
+        && nodes.length != 0) {
+      swap(nodes, 0, r.nextInt(nodes.length));
+    }
+  }
+
+  /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
+   * or physical host. Different from a leaf node, it has non-null children.
+   */
+  static class InnerNodeWithNodeGroup extends InnerNode {
+    public InnerNodeWithNodeGroup(String name, String location, 
+        InnerNode parent, int level) {
+      super(name, location, parent, level);
+    }
+
+    public InnerNodeWithNodeGroup(String name, String location) {
+      super(name, location);
+    }
+
+    public InnerNodeWithNodeGroup(String path) {
+      super(path);
+    }
+
+    @Override
+    boolean isRack() {
+      // it is node group
+      if (getChildren().isEmpty()) {
+        return false;
+      }
+
+      Node firstChild = children.get(0);
+
+      if (firstChild instanceof InnerNode) {
+        Node firstGrandChild = (((InnerNode) firstChild).children).get(0);
+        if (firstGrandChild instanceof InnerNode) {
+          // it is datacenter
+          return false;
+        } else {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    /**
+     * Judge if this node represents a node group
+     * 
+     * @return true if it has no child or its children are not InnerNodes
+     */
+    boolean isNodeGroup() {
+      if (children.isEmpty()) {
+        return true;
+      }
+      Node firstChild = children.get(0);
+      if (firstChild instanceof InnerNode) {
+        // it is rack or datacenter
+        return false;
+      }
+      return true;
+    }
+
+    @Override
+    protected InnerNode createParentNode(String parentName) {
+      return new InnerNodeWithNodeGroup(parentName, getPath(this), this,
+          this.getLevel() + 1);
+    }
+
+    @Override
+    protected boolean areChildrenLeaves() {
+      return isNodeGroup();
+    }
+  }
+}

+ 20 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java

@@ -31,6 +31,8 @@ import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public abstract class AbstractDelegationTokenIdentifier 
@@ -173,16 +175,17 @@ extends TokenIdentifier {
 	throw new IOException("Unknown version of delegation token " + 
                               version);
     }
-    owner.readFields(in);
-    renewer.readFields(in);
-    realUser.readFields(in);
+    owner.readFields(in, Text.DEFAULT_MAX_LEN);
+    renewer.readFields(in, Text.DEFAULT_MAX_LEN);
+    realUser.readFields(in, Text.DEFAULT_MAX_LEN);
     issueDate = WritableUtils.readVLong(in);
     maxDate = WritableUtils.readVLong(in);
     sequenceNumber = WritableUtils.readVInt(in);
     masterKeyId = WritableUtils.readVInt(in);
   }
 
-  public void write(DataOutput out) throws IOException {
+  @VisibleForTesting
+  void writeImpl(DataOutput out) throws IOException {
     out.writeByte(VERSION);
     owner.write(out);
     renewer.write(out);
@@ -193,6 +196,19 @@ extends TokenIdentifier {
     WritableUtils.writeVInt(out, masterKeyId);
   }
   
+  public void write(DataOutput out) throws IOException {
+    if (owner.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("owner is too long to be serialized!");
+    }
+    if (renewer.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("renewer is too long to be serialized!");
+    }
+    if (realUser.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("realuser is too long to be serialized!");
+    }
+    writeImpl(out);
+  }
+  
   public String toString() {
     StringBuilder buffer = new StringBuilder();
     buffer

+ 11 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java

@@ -30,7 +30,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * The <code>ShutdownHookManager</code> enables running shutdownHook
- * in a determistic order, higher priority first.
+ * in a deterministic order, higher priority first.
  * <p/>
  * The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
  * This class registers a single JVM shutdownHook and run all the
@@ -169,7 +169,7 @@ public class ShutdownHookManager {
   }
 
   /**
-   * Indicates if a shutdownHook is registered or nt.
+   * Indicates if a shutdownHook is registered or not.
    *
    * @param shutdownHook shutdownHook to check if registered.
    * @return TRUE/FALSE depending if the shutdownHook is is registered.
@@ -177,5 +177,14 @@ public class ShutdownHookManager {
   public boolean hasShutdownHook(Runnable shutdownHook) {
     return hooks.contains(new HookEntry(shutdownHook, 0));
   }
+  
+  /**
+   * Indicates if shutdown is in progress or not.
+   * 
+   * @return TRUE if the shutdown is in progress, otherwise FALSE.
+   */
+  public boolean isShutdownInProgress() {
+    return shutdownInProgress.get();
+  }
 
 }

+ 0 - 42
hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg

@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop-native library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-

+ 0 - 66
hadoop-common-project/hadoop-common/src/main/native/Makefile.am

@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os-arch}.
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_NAME
-#    * OS_ARCH 
-#    All these are setup by build.xml. 
-#
-
-# Export $(PLATFORM) to prevent proliferation of sub-shells
-export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
-
-ACLOCAL_AMFLAGS = -I m4 
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src \
-              -I$(HADOOP_NATIVE_SRCDIR)/javah
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
-                       src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c \
-                       src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c \
-                       src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c \
-                       src/org/apache/hadoop/io/compress/lz4/lz4.c \
-                       src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c \
-                       src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c \
-                       src/org/apache/hadoop/security/getGroup.c \
-                       src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c \
-                       src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \
-                       src/org/apache/hadoop/io/nativeio/file_descriptor.c \
-                       src/org/apache/hadoop/io/nativeio/errno_enum.c \
-                       src/org/apache/hadoop/io/nativeio/NativeIO.c \
-                       src/org/apache/hadoop/util/NativeCrc32.c \
-                       src/org/apache/hadoop/util/bulk_crc32.c
-
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#

+ 0 - 28
hadoop-common-project/hadoop-common/src/main/native/acinclude.m4

@@ -1,28 +0,0 @@
-# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
-# --------------------------------------------------
-# Compute the 'actual' dynamic-library used 
-# for LIBRARY and set it to PREPROC_SYMBOL
-AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
-[
-AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
-  [
-  echo '$2' > conftest.c
-  if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
-    dnl Try objdump and ldd in that order to get the dynamic library
-    if test ! -z "`which objdump | grep -v 'no objdump'`"; then
-      ac_cv_libname_$1="`objdump -p conftest | grep NEEDED | grep $1 | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`"
-    elif test ! -z "`which ldd | grep -v 'no ldd'`"; then
-      ac_cv_libname_$1="`ldd conftest | grep $1 | sed 's/^[[[^A-Za-z0-9]]]*\([[[A-Za-z0-9\.]]]*\)[[[^A-Za-z0-9]]]*=>.*$/\"\1\"/'`"
-    elif test ! -z "`which otool | grep -v 'no otool'`"; then
-      ac_cv_libname_$1=\"`otool -L conftest | grep $1 | sed -e 's/^[	 ]*//' -e 's/ .*//' -e 's/.*\/\(.*\)$/\1/'`\";
-    else
-      AC_MSG_ERROR(Can't find either 'objdump' or 'ldd' or 'otool' to compute the dynamic library for '-l$1')
-    fi
-  else
-    ac_cv_libname_$1=libnotfound.so
-  fi
-  rm -f conftest*
-  ]
-)
-AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
-])# AC_COMPUTE_NEEDED_DSO

+ 0 - 130
hadoop-common-project/hadoop-common/src/main/native/configure.ac

@@ -1,130 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# configure.ac for hadoop native code. 
-#
-
-# Notes: 
-# 1. This configure.ac depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_NAME
-#    * OS_ARCH 
-#    All these are setup by build.xml. 
-
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-#
-
-AC_PREREQ(2.59)
-AC_INIT(src/org_apache_hadoop.h)
-AC_CONFIG_SRCDIR([src/org_apache_hadoop.h])
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([m4])
-AC_CONFIG_HEADER([config.h])
-AC_SYS_LARGEFILE
-AC_GNU_SOURCE
-
-AM_INIT_AUTOMAKE(hadoop,1.0.0)
-
-# Checks for programs.
-AC_PROG_CC
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-dnl Check for '-ldl'
-AC_CHECK_LIB([dl], [dlopen])
-
-dnl Check for '-ljvm'
-JNI_LDFLAGS=""
-if test $JAVA_HOME != ""
-then
-  JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
-  JVMSOPATH=`find $JAVA_HOME/jre/ -name libjvm.so | head -n 1`
-  JNI_LDFLAGS="$JNI_LDFLAGS -L`dirname $JVMSOPATH`"
-fi
-LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
-AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
-AC_SUBST([JNI_LDFLAGS])
-
-# Checks for header files.
-dnl Check for Ansi C headers
-AC_HEADER_STDC
-
-dnl Check for other standard C headers
-AC_CHECK_HEADERS([stdio.h stddef.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl Check for JNI headers
-JNI_CPPFLAGS=""
-if test $JAVA_HOME != ""
-then
-  for dir in `find $JAVA_HOME/include -follow -type d`
-  do
-    JNI_CPPFLAGS="$JNI_CPPFLAGS -I$dir"
-  done
-fi
-cppflags_bak=$CPPFLAGS
-CPPFLAGS="$CPPFLAGS $JNI_CPPFLAGS"
-AC_CHECK_HEADERS([jni.h], [], AC_MSG_ERROR([Native java headers not found. Is \$JAVA_HOME set correctly?]))
-CPPFLAGS=$cppflags_bak
-AC_SUBST([JNI_CPPFLAGS])
-
-dnl Check for zlib headers
-AC_CHECK_HEADERS([zlib.h zconf.h],
-  AC_COMPUTE_NEEDED_DSO(z,
-    [#include "zlib.h"
-    int main(int argc, char **argv){zlibVersion();return 0;}],
-    HADOOP_ZLIB_LIBRARY),
-  AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
-
-dnl Check for snappy headers
-AC_CHECK_HEADERS([snappy-c.h],
-  AC_COMPUTE_NEEDED_DSO(snappy,
-    [#include "snappy-c.h"
-    int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
-    HADOOP_SNAPPY_LIBRARY),
-  AC_MSG_WARN(Snappy headers were not found... building without snappy.))
-
-dnl Check for headers needed by the native Group resolution implementation
-AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl check for posix_fadvise
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(posix_fadvise)])
-
-dnl check for sync_file_range
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(sync_file_range)])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-
-# Checks for library functions.
-AC_CHECK_FUNCS([memset])
-
-# Check for nonstandard STRERROR_R
-AC_FUNC_STRERROR_R
-
-AM_CONDITIONAL([SPECIFY_DATA_MODEL], [case $host_cpu in arm*) false;; *) true;; esac])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-#
-#vim: sw=2: ts=2: noet
-#

+ 0 - 47
hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am

@@ -1,47 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building libhadoop.so 
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/lib 
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * OS_ARCH 
-#    All these are setup by build.xml and/or the top-level makefile.
-#
-
-# Add .lo files in $(SUBDIRS) to construct libhadoop.so
-HADOOP_OBJS = $(foreach path,$(addprefix ../,$(SUBDIRS)),$(wildcard $(path)/*.lo))
-AM_LDFLAGS = @JNI_LDFLAGS@
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES = 
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = $(HADOOP_OBJS) -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#

+ 1 - 4
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c

@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
 

+ 1 - 4
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c

@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 

+ 5 - 31
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
@@ -123,5 +99,3 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
 
   return (jint)compressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY

+ 5 - 31
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c

@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
 
@@ -127,5 +103,3 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
 
   return (jint)uncompressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY

+ 6 - 35
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h

@@ -17,42 +17,13 @@
  */
 
 
-#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+#ifndef ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 
-
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-  #if defined HAVE_STDDEF_H
-    #include <stddef.h>
-  #else
-    #error 'stddef.h not found'
-  #endif
-
-  #if defined HAVE_SNAPPY_C_H
-    #include <snappy-c.h>
-  #else
-    #error 'Please install snappy-development packages for your platform.'
-  #endif
-
-  #if defined HAVE_DLFCN_H
-    #include <dlfcn.h>
-  #else
-    #error "dlfcn.h not found"
-  #endif
-
-  #if defined HAVE_JNI_H
-    #include <jni.h>
-  #else
-    #error 'jni.h not found'
-  #endif
-
-  #include "org_apache_hadoop.h"
-
-#endif //define HADOOP_SNAPPY_LIBRARY
+#include "org_apache_hadoop.h"
+#include <dlfcn.h>
+#include <jni.h>
+#include <snappy-c.h>
+#include <stddef.h>
 
 #endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H

+ 0 - 53
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am

@@ -1,53 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building native 'zlib' for hadoop.
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/$(subdir) .
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_ARCH 
-#    * PLATFORM
-#    All these are setup by build.xml and/or the top-level makefile.
-# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are
-#    assumed to be in $(HADOOP_PREFIX)/build/native/src/org/apache/hadoop/io/compress/zlib.
-#
-
-# The 'vpath directive' to locate the actual source files 
-vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir)
-
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-noinst_LTLIBRARIES = libnativezlib.la
-libnativezlib_la_SOURCES = ZlibCompressor.c ZlibDecompressor.c
-libnativezlib_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#

+ 5 - 27
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c

@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
 

+ 5 - 27
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c

@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
 

+ 6 - 33
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h

@@ -19,40 +19,13 @@
 #if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDDEF_H
-  #include <stddef.h>
-#else
-  #error 'stddef.h not found'
-#endif
-    
-#if defined HAVE_ZLIB_H
-  #include <zlib.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-    
-#if defined HAVE_ZCONF_H
-  #include <zconf.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
-
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
+#include <dlfcn.h>
+#include <jni.h>
+#include <stddef.h>
+#include <zconf.h>
+#include <zlib.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 
 /* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */

+ 1 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c

@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
@@ -32,6 +29,7 @@
 #include <sys/syscall.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_nativeio_NativeIO.h"
 #include "file_descriptor.h"

+ 1 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c

@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <arpa/inet.h>
 #include <assert.h>
 #include <stdlib.h>
@@ -26,6 +23,7 @@
 #include <string.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_util_NativeCrc32.h"
 #include "gcc_optimizations.h"

+ 3 - 14
hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h

@@ -24,21 +24,10 @@
 #if !defined ORG_APACHE_HADOOP_H
 #define ORG_APACHE_HADOOP_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
+#include <dlfcn.h>
+#include <jni.h>
 
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
+#include "config.h"
 
 /* A helper macro to 'throw' a java exception. */ 
 #define THROW(env, exception_name, message) \

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh

@@ -65,7 +65,7 @@ export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
 export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
 
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

+ 9 - 3
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -599,10 +599,9 @@
   </description>
 </property>
 
-<!-- Rack Configuration -->
-
+<!-- Topology Configuration -->
 <property>
-	<name>net.topology.node.switch.mapping.impl</name>
+  <name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
     invokes a script specified in net.topology.script.file.name to resolve
@@ -611,6 +610,13 @@
   </description>
 </property>
 
+<property>
+  <name>net.topology.impl</name>
+  <value>org.apache.hadoop.net.NetworkTopology</value>
+  <description> The default implementation of NetworkTopology which is classic three layer one.
+  </description>
+</property>
+
 <property>
   <name>net.topology.script.file.name</name>
   <value></value>

+ 20 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -663,6 +663,26 @@ public class TestConfiguration extends TestCase {
                  conf.getPattern("test.pattern3", defaultPattern).pattern());
   }
 
+  public void testPropertySource() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.foo", "bar");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    conf.set("fs.defaultFS", "value");
+    assertEquals(
+        "Resource string returned for a file-loaded property" +
+        " must be a proper absolute path",
+        fileResource,
+        new Path(conf.getPropertySource("test.foo")));
+    assertEquals("Resource string returned for a set() property must be null",
+        null,
+        conf.getPropertySource("fs.defaultFS"));
+    assertEquals("Resource string returned for an unset property must be null",
+        null, conf.getPropertySource("fs.defaultFoo"));
+  }
+
   public void testSocketAddress() throws IOException {
     Configuration conf = new Configuration();
     final String defaultAddr = "host:1";

+ 13 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java

@@ -51,7 +51,19 @@ public class ViewFileSystemTestSetup {
     /**
      * create the test root on local_fs - the  mount table will point here
      */
-    fsTarget.mkdirs(FileSystemTestHelper.getTestRootPath(fsTarget));
+    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+    // In case previous test was killed before cleanup
+    fsTarget.delete(targetOfTests, true);
+    fsTarget.mkdirs(targetOfTests);
+
+    // Setup a link from viewfs to targetfs for the first component of
+    // path of testdir.
+    String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
+        .getPath();
+    int indexOf2ndSlash = testDir.indexOf('/', 1);
+    String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
+    ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
+        new Path(testDirFirstComponent)).toUri());
 
     // viewFs://home => fsTarget://home
     String homeDirRoot = fsTarget.getHomeDirectory()

+ 43 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java

@@ -21,9 +21,13 @@ package org.apache.hadoop.io;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -32,7 +36,8 @@ import org.mockito.Mockito;
  * Test cases for IOUtils.java
  */
 public class TestIOUtils {
-
+  private static final String TEST_FILE_NAME = "test_file";
+  
   @Test
   public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
     InputStream inputStream = Mockito.mock(InputStream.class);
@@ -110,4 +115,41 @@ public class TestIOUtils {
     Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
   }
   
+  @Test
+  public void testWriteFully() throws IOException {
+    final int INPUT_BUFFER_LEN = 10000;
+    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
+    byte[] input = new byte[INPUT_BUFFER_LEN];
+    for (int i = 0; i < input.length; i++) {
+      input[i] = (byte)(i & 0xff);
+    }
+    byte[] output = new byte[input.length];
+    
+    try {
+      RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
+      FileChannel fc = raf.getChannel();
+      ByteBuffer buf = ByteBuffer.wrap(input);
+      IOUtils.writeFully(fc, buf);
+      raf.seek(0);
+      raf.read(output);
+      for (int i = 0; i < input.length; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      buf.rewind();
+      IOUtils.writeFully(fc, buf, HALFWAY);
+      for (int i = 0; i < HALFWAY; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      raf.seek(0);
+      raf.read(output);
+      for (int i = HALFWAY; i < input.length; i++) {
+        assertEquals(input[i - HALFWAY], output[i]);
+      }
+    } finally {
+      File f = new File(TEST_FILE_NAME);
+      if (f.exists()) {
+        f.delete();
+      }
+    }
+  }
 }

+ 9 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java

@@ -137,38 +137,38 @@ public class TestText extends TestCase {
     }
   }
   
-  public void doTestLimitedIO(String str, int strLen) throws IOException {
+  public void doTestLimitedIO(String str, int len) throws IOException {
     DataOutputBuffer out = new DataOutputBuffer();
     DataInputBuffer in = new DataInputBuffer();
 
     out.reset();
     try {
-      Text.writeString(out, str, strLen);
+      Text.writeString(out, str, len);
       fail("expected writeString to fail when told to write a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
-    Text.writeString(out, str, strLen + 1);
+    Text.writeString(out, str, len + 1);
 
     // test that it reads correctly
     in.reset(out.getData(), out.getLength());
-    in.mark(strLen);
+    in.mark(len);
     String after;
     try {
-      after = Text.readString(in, strLen);
+      after = Text.readString(in, len);
       fail("expected readString to fail when told to read a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
     in.reset();
-    after = Text.readString(in, strLen + 1);
+    after = Text.readString(in, len + 1);
     assertTrue(str.equals(after));
   }
   
   public void testLimitedIO() throws Exception {
-    doTestLimitedIO("abcd", 4);
-    doTestLimitedIO("", 0);
-    doTestLimitedIO("1", 1);
+    doTestLimitedIO("abcd", 3);
+    doTestLimitedIO("foo bar baz", 10);
+    doTestLimitedIO("1", 0);
   }
 
   public void testCompare() throws Exception {

+ 18 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -18,50 +18,55 @@
 
 package org.apache.hadoop.ipc;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.Closeable;
 import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslImpl;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslProtocol;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.MockitoUtil;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 import com.google.protobuf.DescriptorProtos;
 import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
 
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
 /** Unit tests for RPC. */
 @SuppressWarnings("deprecation")
 public class TestRPC {
@@ -250,7 +255,8 @@ public class TestRPC {
     @Override
     public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
         InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-        SocketFactory factory, int rpcTimeout) throws IOException {
+        SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+        ) throws IOException {
       T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
               new Class[] { protocol }, new StoppedInvocationHandler());
       return new ProtocolProxy<T>(protocol, proxy, false);

+ 43 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.security.token.delegation;
 
 import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
@@ -387,4 +388,46 @@ public class TestDelegationToken {
     }
   }
 
+  private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner,
+      Text renewer, Text realUser) throws IOException {
+    TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(
+        owner, renewer, realUser);
+    DataOutputBuffer out = new DataOutputBuffer();
+    dtid.writeImpl(out);
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(out.getData(), out.getLength());
+    try {
+      TestDelegationTokenIdentifier dtid2 =
+          new TestDelegationTokenIdentifier();
+      dtid2.readFields(in);
+      assertTrue(dtid.equals(dtid2));
+      return true;
+    } catch(IOException e){
+      return false;
+    }
+  }
+      
+  @Test
+  public void testSimpleDtidSerialization() throws IOException {
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text("realUser")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text(""), new Text("")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text("b"), new Text("")));
+  }
+  
+  @Test
+  public void testOverlongDtidSerialization() throws IOException {
+    byte[] bigBuf = new byte[Text.DEFAULT_MAX_LEN + 1];
+    for (int i = 0; i < bigBuf.length; i++) {
+      bigBuf[i] = 0;
+    }
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(bigBuf), new Text("renewer"), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text(bigBuf), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text(bigBuf)));
+  }
 }

+ 38 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java

@@ -15,15 +15,18 @@ package org.apache.hadoop.util;
 
 import com.google.common.base.Preconditions;
 
+import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.text.MessageFormat;
 import java.util.Enumeration;
+import java.util.jar.JarFile;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
 import java.util.zip.ZipEntry;
@@ -37,10 +40,37 @@ import java.util.zip.ZipOutputStream;
  */
 public class JarFinder {
 
-  private static void zipDir(File dir, String relativePath, ZipOutputStream zos)
+  private static void copyToZipStream(InputStream is, ZipEntry entry,
+                              ZipOutputStream zos) throws IOException {
+    zos.putNextEntry(entry);
+    byte[] arr = new byte[4096];
+    int read = is.read(arr);
+    while (read > -1) {
+      zos.write(arr, 0, read);
+      read = is.read(arr);
+    }
+    is.close();
+    zos.closeEntry();
+  }
+
+  public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
     throws IOException {
     Preconditions.checkNotNull(relativePath, "relativePath");
     Preconditions.checkNotNull(zos, "zos");
+
+    // by JAR spec, if there is a manifest, it must be the first entry in the
+    // ZIP.
+    File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
+    ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
+    if (!manifestFile.exists()) {
+      zos.putNextEntry(manifestEntry);
+      new Manifest().write(new BufferedOutputStream(zos));
+      zos.closeEntry();
+    } else {
+      InputStream is = new FileInputStream(manifestFile);
+      copyToZipStream(is, manifestEntry, zos);
+    }
+    zos.closeEntry();
     zipDir(dir, relativePath, zos, true);
     zos.close();
   }
@@ -62,17 +92,12 @@ public class JarFinder {
           zipDir(file, relativePath + f.getName() + "/", zos, false);
         }
         else {
-          ZipEntry anEntry = new ZipEntry(relativePath + f.getName());
-          zos.putNextEntry(anEntry);
-          InputStream is = new FileInputStream(f);
-          byte[] arr = new byte[4096];
-          int read = is.read(arr);
-          while (read > -1) {
-            zos.write(arr, 0, read);
-            read = is.read(arr);
+          String path = relativePath + f.getName();
+          if (!path.equals(JarFile.MANIFEST_NAME)) {
+            ZipEntry anEntry = new ZipEntry(path);
+            InputStream is = new FileInputStream(f);
+            copyToZipStream(is, anEntry, zos);
           }
-          is.close();
-          zos.closeEntry();
         }
       }
     }
@@ -88,9 +113,8 @@ public class JarFinder {
                                                    jarDir));
       }
     }
-    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile),
-                                              new Manifest());
-    zipDir(dir, "", zos);
+    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
+    jarDir(dir, "", zos);
   }
 
   /**
@@ -142,5 +166,4 @@ public class JarFinder {
     }
     return null;
   }
-
 }

+ 86 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java

@@ -22,21 +22,105 @@ import org.apache.commons.logging.LogFactory;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.Properties;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
 
 public class TestJarFinder {
 
   @Test
-  public void testAppend() throws Exception {
+  public void testJar() throws Exception {
 
     //picking a class that is for sure in a JAR in the classpath
     String jar = JarFinder.getJar(LogFactory.class);
     Assert.assertTrue(new File(jar).exists());
+  }
+
+  private static void delete(File file) throws IOException {
+    if (file.getAbsolutePath().length() < 5) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Path [{0}] is too short, not deleting",
+                             file.getAbsolutePath()));
+    }
+    if (file.exists()) {
+      if (file.isDirectory()) {
+        File[] children = file.listFiles();
+        if (children != null) {
+          for (File child : children) {
+            delete(child);
+          }
+        }
+      }
+      if (!file.delete()) {
+        throw new RuntimeException(
+          MessageFormat.format("Could not delete path [{0}]",
+                               file.getAbsolutePath()));
+      }
+    }
+  }
 
+  @Test
+  public void testExpandedClasspath() throws Exception {
     //picking a class that is for sure in a directory in the classpath
     //in this case the JAR is created on the fly
-    jar = JarFinder.getJar(TestJarFinder.class);
+    String jar = JarFinder.getJar(TestJarFinder.class);
     Assert.assertTrue(new File(jar).exists());
   }
 
+  @Test
+  public void testExistingManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testExistingManifest");
+    delete(dir);
+    dir.mkdirs();
+
+    File metaInfDir = new File(dir, "META-INF");
+    metaInfDir.mkdirs();
+    File manifestFile = new File(metaInfDir, "MANIFEST.MF");
+    Manifest manifest = new Manifest();
+    OutputStream os = new FileOutputStream(manifestFile);
+    manifest.write(os);
+    os.close();
+
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
+
+  @Test
+  public void testNoManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testNoManifest");
+    delete(dir);
+    dir.mkdirs();
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
 }

+ 6 - 0
hadoop-dist/pom.xml

@@ -52,6 +52,11 @@
       <artifactId>hadoop-yarn-api</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-raid</artifactId>
+      <scope>provided</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -120,6 +125,7 @@
                       run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
+                      run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-raid/target/hadoop-hdfs-raid-${project.version}/* .
                       run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
                       run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .
                       echo

+ 82 - 67
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -154,41 +154,33 @@ public class HttpFSFileSystem extends FileSystem {
 
   public static final int HTTP_TEMPORARY_REDIRECT = 307;
 
+  private static final String HTTP_GET = "GET";
+  private static final String HTTP_PUT = "PUT";
+  private static final String HTTP_POST = "POST";
+  private static final String HTTP_DELETE = "DELETE";
 
-  /**
-   * Get operations.
-   */
-  public enum GetOpValues {
-    OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIRECTORY, GETCONTENTSUMMARY, GETFILECHECKSUM,
-    GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
-  }
+  public enum Operation {
+    OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
+    GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
+    GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
+    INSTRUMENTATION(HTTP_GET),
+    APPEND(HTTP_POST),
+    CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
+    SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
+    DELETE(HTTP_DELETE);
 
-  /**
-   * Post operations.
-   */
-  public static enum PostOpValues {
-    APPEND
-  }
+    private String httpMethod;
 
-  /**
-   * Put operations.
-   */
-  public static enum PutOpValues {
-    CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES,
-    RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN
-  }
+    Operation(String httpMethod) {
+      this.httpMethod = httpMethod;
+    }
+
+    public String getMethod() {
+      return httpMethod;
+    }
 
-  /**
-   * Delete operations.
-   */
-  public static enum DeleteOpValues {
-    DELETE
   }
 
-  private static final String HTTP_GET = "GET";
-  private static final String HTTP_PUT = "PUT";
-  private static final String HTTP_POST = "POST";
-  private static final String HTTP_DELETE = "DELETE";
 
   private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private URI uri;
@@ -402,10 +394,12 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.OPEN.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.OPEN.toString());
+    HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
+                                           f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
+    return new FSDataInputStream(
+      new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
   }
 
   /**
@@ -508,15 +502,18 @@ public class HttpFSFileSystem extends FileSystem {
    * @see #setPermission(Path, FsPermission)
    */
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
-                                   short replication, long blockSize, Progressable progress) throws IOException {
+  public FSDataOutputStream create(Path f, FsPermission permission,
+                                   boolean overwrite, int bufferSize,
+                                   short replication, long blockSize,
+                                   Progressable progress) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.CREATE.toString());
+    params.put(OP_PARAM, Operation.CREATE.toString());
     params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
     params.put(REPLICATION_PARAM, Short.toString(replication));
     params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED);
+    return uploadData(Operation.CREATE.getMethod(), f, params, bufferSize,
+                      HttpURLConnection.HTTP_CREATED);
   }
 
 
@@ -532,10 +529,12 @@ public class HttpFSFileSystem extends FileSystem {
    * @throws IOException
    */
   @Override
-  public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+  public FSDataOutputStream append(Path f, int bufferSize,
+                                   Progressable progress) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PostOpValues.APPEND.toString());
-    return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK);
+    params.put(OP_PARAM, Operation.APPEND.toString());
+    return uploadData(Operation.APPEND.getMethod(), f, params, bufferSize,
+                      HttpURLConnection.HTTP_OK);
   }
 
   /**
@@ -545,9 +544,10 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.RENAME.toString());
+    params.put(OP_PARAM, Operation.RENAME.toString());
     params.put(DESTINATION_PARAM, dst.toString());
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+    HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
+                                           params, src, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(RENAME_JSON);
@@ -580,9 +580,10 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, DeleteOpValues.DELETE.toString());
+    params.put(OP_PARAM, Operation.DELETE.toString());
     params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
-    HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true);
+    HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(DELETE_JSON);
@@ -601,8 +602,9 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public FileStatus[] listStatus(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.LISTSTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUSES_JSON);
@@ -647,9 +649,10 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.MKDIRS.toString());
+    params.put(OP_PARAM, Operation.MKDIRS.toString());
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true);
+    HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(MKDIRS_JSON);
@@ -668,8 +671,9 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUS_JSON);
@@ -684,9 +688,11 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public Path getHomeDirectory() {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETHOMEDIRECTORY.toString());
+    params.put(OP_PARAM, Operation.GETHOMEDIRECTORY.toString());
     try {
-      HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
+      HttpURLConnection conn =
+        getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
+                      new Path(getUri().toString(), "/"), false);
       validateResponse(conn, HttpURLConnection.HTTP_OK);
       JSONObject json = (JSONObject) jsonParse(conn);
       return new Path((String) json.get(HOME_DIR_JSON));
@@ -704,12 +710,14 @@ public class HttpFSFileSystem extends FileSystem {
    * @param groupname If it is null, the original groupname remains unchanged.
    */
   @Override
-  public void setOwner(Path p, String username, String groupname) throws IOException {
+  public void setOwner(Path p, String username, String groupname)
+    throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETOWNER.toString());
+    params.put(OP_PARAM, Operation.SETOWNER.toString());
     params.put(OWNER_PARAM, username);
     params.put(GROUP_PARAM, groupname);
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
+                                           params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -722,9 +730,9 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public void setPermission(Path p, FsPermission permission) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString());
+    params.put(OP_PARAM, Operation.SETPERMISSION.toString());
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -742,10 +750,11 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public void setTimes(Path p, long mtime, long atime) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETTIMES.toString());
+    params.put(OP_PARAM, Operation.SETTIMES.toString());
     params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
     params.put(ACCESS_TIME_PARAM, Long.toString(atime));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
+                                           params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -761,11 +770,13 @@ public class HttpFSFileSystem extends FileSystem {
    * @throws IOException
    */
   @Override
-  public boolean setReplication(Path src, short replication) throws IOException {
+  public boolean setReplication(Path src, short replication)
+    throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString());
+    params.put(OP_PARAM, Operation.SETREPLICATION.toString());
     params.put(REPLICATION_PARAM, Short.toString(replication));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+    HttpURLConnection conn =
+      getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(SET_REPLICATION_JSON);
@@ -814,10 +825,12 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
+    HttpURLConnection conn =
+      getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+    JSONObject json =
+      (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
     return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
                               (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
                               (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
@@ -830,10 +843,12 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   public FileChecksum getFileChecksum(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
+    HttpURLConnection conn =
+      getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
+    final JSONObject json =
+      (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
     return new FileChecksum() {
       @Override
       public String getAlgorithmName() {

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java

@@ -30,7 +30,6 @@ import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -43,8 +42,8 @@ public class CheckUploadContentTypeFilter implements Filter {
   private static final Set<String> UPLOAD_OPERATIONS = new HashSet<String>();
 
   static {
-    UPLOAD_OPERATIONS.add(HttpFSFileSystem.PostOpValues.APPEND.toString());
-    UPLOAD_OPERATIONS.add(HttpFSFileSystem.PutOpValues.CREATE.toString());
+    UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.APPEND.toString());
+    UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.CREATE.toString());
   }
 
   /**
@@ -82,7 +81,7 @@ public class CheckUploadContentTypeFilter implements Filter {
     if (method.equals("PUT") || method.equals("POST")) {
       String op = httpReq.getParameter(HttpFSFileSystem.OP_PARAM);
       if (op != null && UPLOAD_OPERATIONS.contains(op.toUpperCase())) {
-        if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParams.DataParam.NAME))) {
+        if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParametersProvider.DataParam.NAME))) {
           String contentType = httpReq.getContentType();
           contentTypeOK =
             HttpFSFileSystem.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.http.server;
 
+import com.sun.jersey.api.container.ContainerException;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.wsrs.ExceptionProvider;
 import org.slf4j.Logger;
@@ -59,6 +60,9 @@ public class HttpFSExceptionProvider extends ExceptionProvider {
     if (throwable instanceof FileSystemAccessException) {
       throwable = throwable.getCause();
     }
+    if (throwable instanceof ContainerException) {
+      throwable = throwable.getCause();
+    }
     if (throwable instanceof SecurityException) {
       status = Response.Status.UNAUTHORIZED;
     } else if (throwable instanceof FileNotFoundException) {
@@ -67,6 +71,8 @@ public class HttpFSExceptionProvider extends ExceptionProvider {
       status = Response.Status.INTERNAL_SERVER_ERROR;
     } else if (throwable instanceof UnsupportedOperationException) {
       status = Response.Status.BAD_REQUEST;
+    } else if (throwable instanceof IllegalArgumentException) {
+      status = Response.Status.BAD_REQUEST;
     } else {
       status = Response.Status.INTERNAL_SERVER_ERROR;
     }

+ 398 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java

@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
+import org.apache.hadoop.lib.wsrs.BooleanParam;
+import org.apache.hadoop.lib.wsrs.EnumParam;
+import org.apache.hadoop.lib.wsrs.LongParam;
+import org.apache.hadoop.lib.wsrs.Param;
+import org.apache.hadoop.lib.wsrs.ParametersProvider;
+import org.apache.hadoop.lib.wsrs.ShortParam;
+import org.apache.hadoop.lib.wsrs.StringParam;
+import org.apache.hadoop.lib.wsrs.UserProvider;
+import org.slf4j.MDC;
+
+import javax.ws.rs.ext.Provider;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ * HttpFS ParametersProvider.
+ */
+@Provider
+public class HttpFSParametersProvider extends ParametersProvider {
+
+  private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF =
+    new HashMap<Enum, Class<Param<?>>[]>();
+
+  static {
+    PARAMS_DEF.put(Operation.OPEN,
+      new Class[]{DoAsParam.class, OffsetParam.class, LenParam.class});
+    PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.LISTSTATUS,
+      new Class[]{DoAsParam.class, FilterParam.class});
+    PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
+      new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.APPEND,
+      new Class[]{DoAsParam.class, DataParam.class});
+    PARAMS_DEF.put(Operation.CREATE,
+      new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
+                  ReplicationParam.class, BlockSizeParam.class, DataParam.class});
+    PARAMS_DEF.put(Operation.MKDIRS,
+      new Class[]{DoAsParam.class, PermissionParam.class});
+    PARAMS_DEF.put(Operation.RENAME,
+      new Class[]{DoAsParam.class, DestinationParam.class});
+    PARAMS_DEF.put(Operation.SETOWNER,
+      new Class[]{DoAsParam.class, OwnerParam.class, GroupParam.class});
+    PARAMS_DEF.put(Operation.SETPERMISSION,
+      new Class[]{DoAsParam.class, PermissionParam.class});
+    PARAMS_DEF.put(Operation.SETREPLICATION,
+      new Class[]{DoAsParam.class, ReplicationParam.class});
+    PARAMS_DEF.put(Operation.SETTIMES,
+      new Class[]{DoAsParam.class, ModifiedTimeParam.class,
+                  AccessTimeParam.class});
+    PARAMS_DEF.put(Operation.DELETE,
+      new Class[]{DoAsParam.class, RecursiveParam.class});
+  }
+
+  public HttpFSParametersProvider() {
+    super(HttpFSFileSystem.OP_PARAM, HttpFSFileSystem.Operation.class,
+          PARAMS_DEF);
+  }
+
+  /**
+   * Class for access-time parameter.
+   */
+  public static class AccessTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
+    /**
+     * Constructor.
+     */
+    public AccessTimeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for block-size parameter.
+   */
+  public static class BlockSizeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public BlockSizeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for data parameter.
+   */
+  public static class DataParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "data";
+
+    /**
+     * Constructor.
+     */
+    public DataParam() {
+      super(NAME, false);
+    }
+  }
+
+  /**
+   * Class for operation parameter.
+   */
+  public static class OperationParam extends EnumParam<HttpFSFileSystem.Operation> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+    /**
+     * Constructor.
+     */
+    public OperationParam(String operation) {
+      super(NAME, HttpFSFileSystem.Operation.class,
+            HttpFSFileSystem.Operation.valueOf(operation.toUpperCase()));
+    }
+  }
+
+  /**
+   * Class for delete's recursive parameter.
+   */
+  public static class RecursiveParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public RecursiveParam() {
+      super(NAME, false);
+    }
+  }
+
+  /**
+   * Class for do-as parameter.
+   */
+  public static class DoAsParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public DoAsParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+    /**
+     * Delegates to parent and then adds do-as user to
+     * MDC context for logging purposes.
+     *
+     *
+     * @param str parameter value.
+     *
+     * @return parsed parameter
+     */
+    @Override
+    public String parseParam(String str) {
+      String doAs = super.parseParam(str);
+      MDC.put(getName(), (doAs != null) ? doAs : "-");
+      return doAs;
+    }
+  }
+
+  /**
+   * Class for filter parameter.
+   */
+  public static class FilterParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "filter";
+
+    /**
+     * Constructor.
+     */
+    public FilterParam() {
+      super(NAME, null);
+    }
+
+  }
+
+  /**
+   * Class for group parameter.
+   */
+  public static class GroupParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public GroupParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for len parameter.
+   */
+  public static class LenParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "len";
+
+    /**
+     * Constructor.
+     */
+    public LenParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for modified-time parameter.
+   */
+  public static class ModifiedTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public ModifiedTimeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for offset parameter.
+   */
+  public static class OffsetParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "offset";
+
+    /**
+     * Constructor.
+     */
+    public OffsetParam() {
+      super(NAME, 0l);
+    }
+  }
+
+  /**
+   * Class for overwrite parameter.
+   */
+  public static class OverwriteParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public OverwriteParam() {
+      super(NAME, true);
+    }
+  }
+
+  /**
+   * Class for owner parameter.
+   */
+  public static class OwnerParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public OwnerParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for permission parameter.
+   */
+  public static class PermissionParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
+
+    /**
+     * Symbolic Unix permissions regular expression pattern.
+     */
+    private static final Pattern PERMISSION_PATTERN =
+      Pattern.compile(HttpFSFileSystem.DEFAULT_PERMISSION +
+                      "|[0-1]?[0-7][0-7][0-7]");
+
+    /**
+     * Constructor.
+     */
+    public PermissionParam() {
+      super(NAME, HttpFSFileSystem.DEFAULT_PERMISSION, PERMISSION_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for replication parameter.
+   */
+  public static class ReplicationParam extends ShortParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public ReplicationParam() {
+      super(NAME, (short) -1);
+    }
+  }
+
+  /**
+   * Class for to-path parameter.
+   */
+  public static class DestinationParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public DestinationParam() {
+      super(NAME, null);
+    }
+  }
+}

+ 0 - 551
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java

@@ -1,551 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.lib.wsrs.BooleanParam;
-import org.apache.hadoop.lib.wsrs.EnumParam;
-import org.apache.hadoop.lib.wsrs.LongParam;
-import org.apache.hadoop.lib.wsrs.ShortParam;
-import org.apache.hadoop.lib.wsrs.StringParam;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.slf4j.MDC;
-
-import java.util.regex.Pattern;
-
-/**
- * HttpFS HTTP Parameters used by {@link HttpFSServer}.
- */
-public class HttpFSParams {
-
-  /**
-   * To avoid instantiation.
-   */
-  private HttpFSParams() {
-  }
-
-  /**
-   * Class for access-time parameter.
-   */
-  public static class AccessTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public AccessTimeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for block-size parameter.
-   */
-  public static class BlockSizeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public BlockSizeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for data parameter.
-   */
-  public static class DataParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "data";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "false";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DataParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for DELETE operation parameter.
-   */
-  public static class DeleteOpParam extends EnumParam<HttpFSFileSystem.DeleteOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DeleteOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.DeleteOpValues.class);
-    }
-  }
-
-  /**
-   * Class for delete's recursive parameter.
-   */
-  public static class DeleteRecursiveParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "false";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DeleteRecursiveParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for do-as parameter.
-   */
-  public static class DoAsParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DoAsParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-    /**
-     * Delegates to parent and then adds do-as user to
-     * MDC context for logging purposes.
-     *
-     * @param name parameter name.
-     * @param str parameter value.
-     *
-     * @return parsed parameter
-     */
-    @Override
-    public String parseParam(String name, String str) {
-      String doAs = super.parseParam(name, str);
-      MDC.put(NAME, (doAs != null) ? doAs : "-");
-      return doAs;
-    }
-  }
-
-  /**
-   * Class for filter parameter.
-   */
-  public static class FilterParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "filter";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param expr parameter value.
-     */
-    public FilterParam(String expr) {
-      super(NAME, expr);
-    }
-
-  }
-
-  /**
-   * Class for path parameter.
-   */
-  public static class FsPathParam extends StringParam {
-
-    /**
-     * Constructor.
-     *
-     * @param path parameter value.
-     */
-    public FsPathParam(String path) {
-      super("path", path);
-    }
-
-    /**
-     * Makes the path absolute adding '/' to it.
-     * <p/>
-     * This is required because JAX-RS resolution of paths does not add
-     * the root '/'.
-     */
-    public void makeAbsolute() {
-      String path = value();
-      path = "/" + ((path != null) ? path : "");
-      setValue(path);
-    }
-
-  }
-
-  /**
-   * Class for GET operation parameter.
-   */
-  public static class GetOpParam extends EnumParam<HttpFSFileSystem.GetOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public GetOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.GetOpValues.class);
-    }
-  }
-
-  /**
-   * Class for group parameter.
-   */
-  public static class GroupParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public GroupParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for len parameter.
-   */
-  public static class LenParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "len";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public LenParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for modified-time parameter.
-   */
-  public static class ModifiedTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public ModifiedTimeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for offset parameter.
-   */
-  public static class OffsetParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "offset";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "0";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OffsetParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for overwrite parameter.
-   */
-  public static class OverwriteParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "true";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OverwriteParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for owner parameter.
-   */
-  public static class OwnerParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OwnerParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for permission parameter.
-   */
-  public static class PermissionParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION;
-
-
-    /**
-     * Symbolic Unix permissions regular expression pattern.
-     */
-    private static final Pattern PERMISSION_PATTERN =
-      Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
-
-    /**
-     * Constructor.
-     *
-     * @param permission parameter value.
-     */
-    public PermissionParam(String permission) {
-      super(NAME, permission.toLowerCase(), PERMISSION_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for POST operation parameter.
-   */
-  public static class PostOpParam extends EnumParam<HttpFSFileSystem.PostOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public PostOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.PostOpValues.class);
-    }
-  }
-
-  /**
-   * Class for PUT operation parameter.
-   */
-  public static class PutOpParam extends EnumParam<HttpFSFileSystem.PutOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public PutOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.PutOpValues.class);
-    }
-  }
-
-  /**
-   * Class for replication parameter.
-   */
-  public static class ReplicationParam extends ShortParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public ReplicationParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for to-path parameter.
-   */
-  public static class ToPathParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param path parameter value.
-     */
-    public ToPathParam(String path) {
-      super(NAME, path);
-    }
-  }
-}

+ 325 - 359
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java

@@ -21,26 +21,22 @@ package org.apache.hadoop.fs.http.server;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.service.Groups;
@@ -49,6 +45,7 @@ import org.apache.hadoop.lib.service.ProxyUser;
 import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
 import org.apache.hadoop.lib.servlet.HostnameFilter;
 import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.apache.hadoop.lib.wsrs.Parameters;
 import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
@@ -57,7 +54,6 @@ import org.slf4j.MDC;
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
@@ -89,39 +85,6 @@ import java.util.Map;
 public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
 
-  /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   *
-   * @param user principal making the request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @GET
-  @Path("/")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response root(@Context Principal user,
-                       @QueryParam(GetOpParam.NAME) GetOpParam op,
-                       @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
-               new LenParam(LenParam.DEFAULT), filter, doAs,
-               new OverwriteParam(OverwriteParam.DEFAULT),
-               new BlockSizeParam(BlockSizeParam.DEFAULT),
-               new PermissionParam(PermissionParam.DEFAULT),
-               new ReplicationParam(ReplicationParam.DEFAULT));
-  }
-
   /**
    * Resolves the effective user that will be used to request a FileSystemAccess filesystem.
    * <p/>
@@ -207,402 +170,405 @@ public class HttpFSServer {
     return fs;
   }
 
+  private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
+    if (!path.equals("/")) {
+      throw new UnsupportedOperationException(
+        MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
+                             op, path));
+    }
+  }
+
   /**
-   * Binding to handle all GET requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues}.
-   * <p/>
-   * The @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
-   * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
-   * HttpFSServer instrumentation data. The specified path must be '/'.
+   * Special binding for '/' as it is not handled by the wildcard binding.
    *
-   * @param user principal making the request.
-   * @param path path for the GET request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param offset of the  file being fetch, used only with
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN} operations.
-   * @param len amounts of bytes, used only with @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}
-   * operations.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+   * @param user the principal of the user making the request.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @GET
+  @Path("/")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getRoot(@Context Principal user,
+                          @QueryParam(OperationParam.NAME) OperationParam op,
+                          @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    return get(user, "", op, params);
+  }
+
+  private String makeAbsolute(String path) {
+    return "/" + ((path != null) ? path : "");
+  }
+
+  /**
+   * Binding to handle GET requests, supported operations are
+   *
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @GET
   @Path("{path:.*}")
   @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response get(@Context Principal user,
-                      @PathParam("path") @DefaultValue("") FsPathParam path,
-                      @QueryParam(GetOpParam.NAME) GetOpParam op,
-                      @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
-                      @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
-                      @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
-
-                      //these params are only for createHandle operation acceptance purposes
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication
-  )
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
-    } else {
-      path.makeAbsolute();
-      MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-      switch (op.value()) {
-        case OPEN: {
-          //Invoking the command directly using an unmanaged FileSystem that is released by the
-          //FileSystemReleaseFilter
-          FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
-          FileSystem fs = createFileSystem(user, doAs.value());
-          InputStream is = command.execute(fs);
-          AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
-          InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
-          response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-          break;
-        }
-        case GETFILESTATUS: {
-          FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case LISTSTATUS: {
-          FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          if (filter.value() == null) {
-            AUDIT_LOG.info("[{}]", path);
-          } else {
-            AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
-          }
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETHOMEDIRECTORY: {
-          FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
-          JSONObject json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("");
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case INSTRUMENTATION: {
-          if (!path.value().equals("/")) {
-            throw new UnsupportedOperationException(
-              MessageFormat.format("Invalid path for {0}={1}, must be '/'",
-                                   GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
-          }
-          Groups groups = HttpFSServerWebApp.get().get(Groups.class);
-          List<String> userGroups = groups.getGroups(user.getName());
-          if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
-            throw new AccessControlException("User not in HttpFSServer admin group");
-          }
-          Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
-          Map snapshot = instrumentation.getSnapshot();
-          response = Response.ok(snapshot).build();
-          break;
-        }
-        case GETCONTENTSUMMARY: {
-          FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETFILECHECKSUM: {
-          FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETDELEGATIONTOKEN: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
-        }
-        case GETFILEBLOCKLOCATIONS: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case OPEN: {
+        //Invoking the command directly using an unmanaged FileSystem that is
+        // released by the FileSystemReleaseFilter
+        FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+        FileSystem fs = createFileSystem(user, doAs);
+        InputStream is = command.execute(fs);
+        Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+        Long len = params.get(LenParam.NAME, LenParam.class);
+        AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+                       new Object[]{path, offset, len});
+        InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+        response =
+          Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
+        break;
+      }
+      case GETFILESTATUS: {
+        FSOperations.FSFileStatus command =
+          new FSOperations.FSFileStatus(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case LISTSTATUS: {
+        String filter = params.get(FilterParam.NAME, FilterParam.class);
+        FSOperations.FSListStatus command = new FSOperations.FSListStatus(
+          path, filter);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] filter [{}]", path,
+                       (filter != null) ? filter : "-");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETHOMEDIRECTORY: {
+        enforceRootPath(op.value(), path);
+        FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case INSTRUMENTATION: {
+        enforceRootPath(op.value(), path);
+        Groups groups = HttpFSServerWebApp.get().get(Groups.class);
+        List<String> userGroups = groups.getGroups(user.getName());
+        if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
+          throw new AccessControlException(
+            "User not in HttpFSServer admin group");
         }
+        Instrumentation instrumentation =
+          HttpFSServerWebApp.get().get(Instrumentation.class);
+        Map snapshot = instrumentation.getSnapshot();
+        response = Response.ok(snapshot).build();
+        break;
+      }
+      case GETCONTENTSUMMARY: {
+        FSOperations.FSContentSummary command =
+          new FSOperations.FSContentSummary(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILECHECKSUM: {
+        FSOperations.FSFileChecksum command =
+          new FSOperations.FSFileChecksum(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILEBLOCKLOCATIONS: {
+        response = Response.status(Response.Status.BAD_REQUEST).build();
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP GET operation [{0}]",
+                               op.value()));
       }
-      return response;
     }
+    return response;
   }
 
-  /**
-   * Creates the URL for an upload operation (create or append).
-   *
-   * @param uriInfo uri info of the request.
-   * @param uploadOperation operation for the upload URL.
-   *
-   * @return the URI for uploading data.
-   */
-  protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
-    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
-    uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
-      queryParam(DataParam.NAME, Boolean.TRUE);
-    return uriBuilder.build(null);
-  }
 
   /**
-   * Binding to handle all DELETE requests.
+   * Binding to handle DELETE requests.
    *
-   * @param user principal making the request.
-   * @param path path for the DELETE request.
-   * @param op DELETE operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.DeleteOpValues#DELETE}.
-   * @param recursive indicates if the delete is recursive, default is <code>false</code>
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @DELETE
   @Path("{path:.*}")
   @Produces(MediaType.APPLICATION_JSON)
   public Response delete(@Context Principal user,
-                         @PathParam("path") FsPathParam path,
-                         @QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
-                         @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
-                         DeleteRecursiveParam recursive,
-                         @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
-    }
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case DELETE: {
-        path.makeAbsolute();
-        MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
+        Boolean recursive =
+          params.get(RecursiveParam.NAME,  RecursiveParam.class);
         AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
-        FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
+        FSOperations.FSDelete command =
+          new FSOperations.FSDelete(path, recursive);
+        JSONObject json = fsExecute(user, doAs, command);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
+                               op.value()));
+      }
     }
     return response;
   }
 
+  /**
+   * Binding to handle POST requests.
+   *
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @POST
+  @Path("{path:.*}")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response post(InputStream is,
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case APPEND: {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
+          response = Response.temporaryRedirect(
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.APPEND)).build();
+        } else {
+          FSOperations.FSAppend command =
+            new FSOperations.FSAppend(is, path);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
+        }
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP POST operation [{0}]",
+                               op.value()));
+      }
+    }
+    return response;
+  }
 
   /**
-   * Binding to handle all PUT requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues}.
+   * Creates the URL for an upload operation (create or append).
    *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the PUT request.
-   * @param op PUT operation, no default value.
-   * @param toPath new path, used only for
-   * {@link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#RENAME} operations.
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param owner owner to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param group group to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
-   * @param modifiedTime modified time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param accessTime accessed time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param hasData indicates if the append request is uploading data or not
-   * (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
+   * @param uriInfo uri info of the request.
+   * @param uploadOperation operation for the upload URL.
+   *
+   * @return the URI for uploading data.
+   */
+  protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
+    UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
+    uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation).
+      queryParam(DataParam.NAME, Boolean.TRUE);
+    return uriBuilder.build(null);
+  }
+
+
+  /**
+   * Binding to handle PUT requests.
+   *
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @PUT
   @Path("{path:.*}")
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response put(InputStream is,
-                      @Context Principal user,
-                      @Context UriInfo uriInfo,
-                      @PathParam("path") FsPathParam path,
-                      @QueryParam(PutOpParam.NAME) PutOpParam op,
-                      @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
-                      @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
-                      @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication,
-                      @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
-                      ModifiedTimeParam modifiedTime,
-                      @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
-                      AccessTimeParam accessTime,
-                      @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
-    }
-    path.makeAbsolute();
+    Response response;
+    path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case CREATE: {
-        if (!hasData.value()) {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
           response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.CREATE)).build();
         } else {
-          FSOperations.FSCreate
-            command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
-                                                replication.value(), blockSize.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
-                         new Object[]{path, permission, override, replication, blockSize});
+          String permission = params.get(PermissionParam.NAME,
+                                         PermissionParam.class);
+          boolean override = params.get(OverwriteParam.NAME,
+                                        OverwriteParam.class);
+          short replication = params.get(ReplicationParam.NAME,
+                                         ReplicationParam.class);
+          long blockSize = params.get(BlockSizeParam.NAME,
+                                      BlockSizeParam.class);
+          FSOperations.FSCreate command =
+            new FSOperations.FSCreate(is, path, permission, override,
+                                      replication, blockSize);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info(
+            "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
+            new Object[]{path, permission, override, replication, blockSize});
           response = Response.status(Response.Status.CREATED).build();
         }
         break;
       }
       case MKDIRS: {
-        FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSMkdirs command =
+          new FSOperations.FSMkdirs(path, permission);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] permission [{}]", path, permission);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case RENAME: {
-        FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
+        String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
+        FSOperations.FSRename command =
+          new FSOperations.FSRename(path, toPath);
+        JSONObject json = fsExecute(user, doAs, command);
         AUDIT_LOG.info("[{}] to [{}]", path, toPath);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case SETOWNER: {
-        FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
+        String owner = params.get(OwnerParam.NAME, OwnerParam.class);
+        String group = params.get(GroupParam.NAME, GroupParam.class);
+        FSOperations.FSSetOwner command =
+          new FSOperations.FSSetOwner(path, owner, group);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
         response = Response.ok().build();
         break;
       }
       case SETPERMISSION: {
-        FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSSetPermission command =
+          new FSOperations.FSSetPermission(path, permission);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, permission);
         response = Response.ok().build();
         break;
       }
       case SETREPLICATION: {
-        FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
+        short replication = params.get(ReplicationParam.NAME,
+                                       ReplicationParam.class);
+        FSOperations.FSSetReplication command =
+          new FSOperations.FSSetReplication(path, replication);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, replication);
         response = Response.ok(json).build();
         break;
       }
       case SETTIMES: {
-        FSOperations.FSSetTimes
-          command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
+        long modifiedTime = params.get(ModifiedTimeParam.NAME,
+                                       ModifiedTimeParam.class);
+        long accessTime = params.get(AccessTimeParam.NAME,
+                                     AccessTimeParam.class);
+        FSOperations.FSSetTimes command =
+          new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
+                       modifiedTime + ":" + accessTime);
         response = Response.ok().build();
         break;
       }
-      case RENEWDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-      case CANCELDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-    }
-    return response;
-  }
-
-  /**
-   * Binding to handle all OPST requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues}.
-   *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the POST request.
-   * @param op POST operation, default is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND}.
-   * @param hasData indicates if the append request is uploading data or not (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @POST
-  @Path("{path:.*}")
-  @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
-  public Response post(InputStream is,
-                       @Context Principal user,
-                       @Context UriInfo uriInfo,
-                       @PathParam("path") FsPathParam path,
-                       @QueryParam(PostOpParam.NAME) PostOpParam op,
-                       @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
-    }
-    path.makeAbsolute();
-    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    switch (op.value()) {
-      case APPEND: {
-        if (!hasData.value()) {
-          response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
-        } else {
-          FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
-        }
-        break;
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP PUT operation [{0}]",
+                               op.value()));
       }
     }
     return response;

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java

@@ -22,15 +22,14 @@ import java.text.MessageFormat;
 
 public abstract class BooleanParam extends Param<Boolean> {
 
-  public BooleanParam(String name, String str) {
-    value = parseParam(name, str);
+  public BooleanParam(String name, Boolean defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Boolean parse(String str) throws Exception {
     if (str.equalsIgnoreCase("true")) {
       return true;
-    }
-    if (str.equalsIgnoreCase("false")) {
+    } else if (str.equalsIgnoreCase("false")) {
       return false;
     }
     throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class ByteParam extends Param<Byte> {
 
-  public ByteParam(String name, String str) {
-    value = parseParam(name, str);
+  public ByteParam(String name, Byte defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Byte parse(String str) throws Exception {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java

@@ -25,9 +25,9 @@ import java.util.Arrays;
 public abstract class EnumParam<E extends Enum<E>> extends Param<E> {
   Class<E> klass;
 
-  public EnumParam(String label, String str, Class<E> e) {
+  public EnumParam(String name, Class<E> e, E defaultValue) {
+    super(name, defaultValue);
     klass = e;
-    value = parseParam(label, str);
   }
 
   protected E parse(String str) throws Exception {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class IntegerParam extends Param<Integer> {
 
-  public IntegerParam(String name, String str) {
-    value = parseParam(name, str);
+  public IntegerParam(String name, Integer defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Integer parse(String str) throws Exception {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class LongParam extends Param<Long> {
 
-  public LongParam(String name, String str) {
-    value = parseParam(name, str);
+  public LongParam(String name, Long defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Long parse(String str) throws Exception {

+ 15 - 8
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java

@@ -23,32 +23,39 @@ import org.apache.hadoop.lib.util.Check;
 import java.text.MessageFormat;
 
 public abstract class Param<T> {
+  private String name;
   protected T value;
 
-  public T parseParam(String name, String str) {
-    Check.notNull(name, "name");
+  public Param(String name, T defaultValue) {
+    this.name = name;
+    this.value = defaultValue;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public T parseParam(String str) {
     try {
-      return (str != null && str.trim().length() > 0) ? parse(str) : null;
+      value = (str != null && str.trim().length() > 0) ? parse(str) : value;
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
                              name, str, getDomain()));
     }
+    return value;
   }
 
   public T value() {
     return value;
   }
 
-  protected void setValue(T value) {
-    this.value = value;
-  }
-
   protected abstract String getDomain();
 
   protected abstract T parse(String str) throws Exception;
 
   public String toString() {
-    return value.toString();
+    return (value != null) ? value.toString() : "NULL";
   }
+
 }

+ 27 - 28
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java → hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java

@@ -15,38 +15,37 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.lib.wsrs;
 
+import java.util.Map;
 
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestEnumParam {
-
-  public static enum ENUM {
-    FOO, BAR
-  }
-
-  @Test
-  public void param() throws Exception {
-    EnumParam<ENUM> param = new EnumParam<ENUM>("p", "FOO", ENUM.class) {
-    };
-    Assert.assertEquals(param.getDomain(), "FOO,BAR");
-    Assert.assertEquals(param.value(), ENUM.FOO);
-    Assert.assertEquals(param.toString(), "FOO");
-    param = new EnumParam<ENUM>("p", null, ENUM.class) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new EnumParam<ENUM>("p", "", ENUM.class) {
-    };
-    Assert.assertEquals(param.value(), null);
+/**
+ * Class that contains all parsed JAX-RS parameters.
+ * <p/>
+ * Instances are created by the {@link ParametersProvider} class.
+ */
+public class Parameters {
+  private Map<String, Param<?>> params;
+
+  /**
+   * Constructor that receives the request parsed parameters.
+   *
+   * @param params the request parsed parameters.
+   */
+  public Parameters(Map<String, Param<?>> params) {
+    this.params = params;
   }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new EnumParam<ENUM>("p", "x", ENUM.class) {
-    };
+  /**
+   * Returns the value of a request parsed parameter.
+   *
+   * @param name parameter name.
+   * @param klass class of the parameter, used for value casting.
+  * @return the value of the parameter.
+   */
+  @SuppressWarnings("unchecked")
+  public <V, T extends Param<V>> V get(String name, Class<T> klass) {
+    return ((T)params.get(name)).value();
   }
-
+  
 }

+ 107 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java

@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import com.sun.jersey.api.core.HttpContext;
+import com.sun.jersey.core.spi.component.ComponentContext;
+import com.sun.jersey.core.spi.component.ComponentScope;
+import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
+import com.sun.jersey.spi.inject.Injectable;
+import com.sun.jersey.spi.inject.InjectableProvider;
+
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import java.lang.reflect.Type;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Jersey provider that parses the request parameters based on the
+ * given parameter definition. 
+ */
+public class ParametersProvider
+  extends AbstractHttpContextInjectable<Parameters>
+  implements InjectableProvider<Context, Type> {
+
+  private String driverParam;
+  private Class<? extends Enum> enumClass;
+  private Map<Enum, Class<Param<?>>[]> paramsDef;
+
+  public ParametersProvider(String driverParam, Class<? extends Enum> enumClass,
+                            Map<Enum, Class<Param<?>>[]> paramsDef) {
+    this.driverParam = driverParam;
+    this.enumClass = enumClass;
+    this.paramsDef = paramsDef;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Parameters getValue(HttpContext httpContext) {
+    Map<String, Param<?>> map = new HashMap<String, Param<?>>();
+    MultivaluedMap<String, String> queryString =
+      httpContext.getRequest().getQueryParameters();
+    String str = queryString.getFirst(driverParam);
+    if (str == null) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Missing Operation parameter [{0}]",
+                             driverParam));
+    }
+    Enum op;
+    try {
+      op = Enum.valueOf(enumClass, str.toUpperCase());
+    } catch (IllegalArgumentException ex) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Invalid Operation [{0}]", str));
+    }
+    if (!paramsDef.containsKey(op)) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Unsupported Operation [{0}]", op));
+    }
+    for (Class<Param<?>> paramClass : paramsDef.get(op)) {
+      Param<?> param;
+      try {
+        param = paramClass.newInstance();
+      } catch (Exception ex) {
+        throw new UnsupportedOperationException(
+          MessageFormat.format(
+            "Param class [{0}] does not have default constructor",
+            paramClass.getName()));
+      }
+      try {
+        param.parseParam(queryString.getFirst(param.getName()));
+      }
+      catch (Exception ex) {
+        throw new IllegalArgumentException(ex.toString(), ex);
+      }
+      map.put(param.getName(), param);
+    }
+    return new Parameters(map);
+  }
+
+  @Override
+  public ComponentScope getScope() {
+    return ComponentScope.PerRequest;
+  }
+
+  @Override
+  public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
+    return (type.equals(Parameters.class)) ? this : null;
+  }
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.lib.wsrs;
 
 public abstract class ShortParam extends Param<Short> {
 
-  public ShortParam(String name, String str) {
-    value = parseParam(name, str);
+  public ShortParam(String name, Short defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Short parse(String str) throws Exception {

+ 9 - 13
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java

@@ -15,42 +15,38 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.lib.wsrs;
 
-import org.apache.hadoop.lib.util.Check;
-
 import java.text.MessageFormat;
 import java.util.regex.Pattern;
 
 public abstract class StringParam extends Param<String> {
   private Pattern pattern;
 
-  public StringParam(String name, String str) {
-    this(name, str, null);
+  public StringParam(String name, String defaultValue) {
+    this(name, defaultValue, null);
   }
 
-  public StringParam(String name, String str, Pattern pattern) {
+  public StringParam(String name, String defaultValue, Pattern pattern) {
+    super(name, defaultValue);
     this.pattern = pattern;
-    value = parseParam(name, str);
+    parseParam(defaultValue);
   }
 
-  public String parseParam(String name, String str) {
-    String ret = null;
-    Check.notNull(name, "name");
+  public String parseParam(String str) {
     try {
       if (str != null) {
         str = str.trim();
         if (str.length() > 0) {
-          return parse(str);
+          value = parse(str);
         }
       }
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
-                             name, str, getDomain()));
+                             getName(), str, getDomain()));
     }
-    return ret;
+    return value;
   }
 
   protected String parse(String str) throws Exception {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java

@@ -475,6 +475,7 @@ public class TestHttpFSFileSystem extends HFSTestCase {
       ops[i] = new Object[]{Operation.values()[i]};
     }
     return Arrays.asList(ops);
+//    return Arrays.asList(new Object[][]{ new Object[]{Operation.CREATE}});
   }
 
   private Operation operation;

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java

@@ -31,34 +31,34 @@ public class TestCheckUploadContentTypeFilter {
 
   @Test
   public void putUpload() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "application/octet-stream", true, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
   }
 
   @Test
   public void postUpload() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
   }
 
   @Test
   public void putUploadWrong() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", false, false);
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", true, true);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
   }
 
   @Test
   public void postUploadWrong() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", false, false);
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", true, true);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
   }
 
   @Test
   public void getOther() throws Exception {
-    test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
+    test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
   }
 
   @Test
   public void putOther() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.MKDIRS.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
   }
 
   private void test(String method, String operation, String contentType,
@@ -68,7 +68,7 @@ public class TestCheckUploadContentTypeFilter {
     Mockito.reset(request);
     Mockito.when(request.getMethod()).thenReturn(method);
     Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
-    Mockito.when(request.getParameter(HttpFSParams.DataParam.NAME)).
+    Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
       thenReturn(Boolean.toString(upload));
     Mockito.when(request.getContentType()).thenReturn(contentType);
 

+ 0 - 50
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java

@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestBooleanParam {
-
-  @Test
-  public void param() throws Exception {
-    BooleanParam param = new BooleanParam("p", "true") {
-    };
-    Assert.assertEquals(param.getDomain(), "a boolean");
-    Assert.assertEquals(param.value(), Boolean.TRUE);
-    Assert.assertEquals(param.toString(), "true");
-    param = new BooleanParam("p", "false") {
-    };
-    Assert.assertEquals(param.value(), Boolean.FALSE);
-    param = new BooleanParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new BooleanParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid() throws Exception {
-    new BooleanParam("p", "x") {
-    };
-  }
-
-}

+ 0 - 53
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestByteParam {
-
-  @Test
-  public void param() throws Exception {
-    ByteParam param = new ByteParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a byte");
-    Assert.assertEquals(param.value(), new Byte((byte) 1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new ByteParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new ByteParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new ByteParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new ByteParam("p", "256") {
-    };
-  }
-}

+ 0 - 52
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java

@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestIntegerParam {
-
-  @Test
-  public void param() throws Exception {
-    IntegerParam param = new IntegerParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "an integer");
-    Assert.assertEquals(param.value(), new Integer(1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new IntegerParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new IntegerParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new IntegerParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new IntegerParam("p", "" + Long.MAX_VALUE) {
-    };
-  }
-}

+ 0 - 47
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java

@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestLongParam {
-
-  @Test
-  public void param() throws Exception {
-    LongParam param = new LongParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a long");
-    Assert.assertEquals(param.value(), new Long(1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new LongParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new LongParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new LongParam("p", "x") {
-    };
-  }
-
-}

+ 120 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java

@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import junit.framework.Assert;
+import org.junit.Test;
+
+import java.util.regex.Pattern;
+
+public class TestParam {
+
+  private <T> void test(Param<T> param, String name,
+                   String domain, T defaultValue, T validValue,
+                   String invalidStrValue, String outOfRangeValue) throws Exception {
+
+    Assert.assertEquals(name, param.getName());
+    Assert.assertEquals(domain, param.getDomain());
+    Assert.assertEquals(defaultValue, param.value());
+    Assert.assertEquals(defaultValue, param.parseParam(""));
+    Assert.assertEquals(defaultValue, param.parseParam(null));
+    Assert.assertEquals(validValue, param.parseParam(validValue.toString()));
+    if (invalidStrValue != null) {
+      try {
+        param.parseParam(invalidStrValue);
+        Assert.fail();
+      } catch (IllegalArgumentException ex) {
+        //NOP
+      } catch (Exception ex) {
+        Assert.fail();
+      }
+    }
+    if (outOfRangeValue != null) {
+      try {
+        param.parseParam(outOfRangeValue);
+        Assert.fail();
+      } catch (IllegalArgumentException ex) {
+        //NOP
+      } catch (Exception ex) {
+        Assert.fail();
+      }
+    }
+   }
+
+  @Test
+  public void testBoolean() throws Exception {
+    Param<Boolean> param = new BooleanParam("b", false) {
+    };
+    test(param, "b", "a boolean", false, true, "x", null);
+  }
+
+  @Test
+  public void testByte() throws Exception {
+    Param<Byte> param = new ByteParam("B", (byte) 1) {
+    };
+    test(param, "B", "a byte", (byte) 1, (byte) 2, "x", "256");
+  }
+
+  @Test
+  public void testShort() throws Exception {
+    Param<Short> param = new ShortParam("S", (short) 1) {
+    };
+    test(param, "S", "a short", (short) 1, (short) 2, "x",
+         "" + ((int)Short.MAX_VALUE + 1));
+  }
+
+  @Test
+  public void testInteger() throws Exception {
+    Param<Integer> param = new IntegerParam("I", 1) {
+    };
+    test(param, "I", "an integer", 1, 2, "x", "" + ((long)Integer.MAX_VALUE + 1));
+  }
+
+  @Test
+  public void testLong() throws Exception {
+    Param<Long> param = new LongParam("L", 1L) {
+    };
+    test(param, "L", "a long", 1L, 2L, "x", null);
+  }
+
+  public static enum ENUM {
+    FOO, BAR
+  }
+
+  @Test
+  public void testEnum() throws Exception {
+    EnumParam<ENUM> param = new EnumParam<ENUM>("e", ENUM.class, ENUM.FOO) {
+    };
+    test(param, "e", "FOO,BAR", ENUM.FOO, ENUM.BAR, "x", null);
+  }
+
+  @Test
+  public void testString() throws Exception {
+    Param<String> param = new StringParam("s", "foo") {
+    };
+    test(param, "s", "a string", "foo", "bar", null, null);
+  }
+
+  @Test
+  public void testRegEx() throws Exception {
+    Param<String> param = new StringParam("r", "aa", Pattern.compile("..")) {
+    };
+    test(param, "r", "..", "aa", "bb", "c", null);
+  }
+}

+ 0 - 53
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestShortParam {
-
-  @Test
-  public void param() throws Exception {
-    ShortParam param = new ShortParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a short");
-    Assert.assertEquals(param.value(), new Short((short) 1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new ShortParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new ShortParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new ShortParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new ShortParam("p", "" + Integer.MAX_VALUE) {
-    };
-  }
-}

+ 0 - 64
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java

@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-import java.util.regex.Pattern;
-
-public class TestStringParam {
-
-  @Test
-  public void param() throws Exception {
-    StringParam param = new StringParam("p", "s") {
-    };
-    Assert.assertEquals(param.getDomain(), "a string");
-    Assert.assertEquals(param.value(), "s");
-    Assert.assertEquals(param.toString(), "s");
-    param = new StringParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new StringParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-
-    param.setValue("S");
-    Assert.assertEquals(param.value(), "S");
-  }
-
-  @Test
-  public void paramRegEx() throws Exception {
-    StringParam param = new StringParam("p", "Aaa", Pattern.compile("A.*")) {
-    };
-    Assert.assertEquals(param.getDomain(), "A.*");
-    Assert.assertEquals(param.value(), "Aaa");
-    Assert.assertEquals(param.toString(), "Aaa");
-    param = new StringParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void paramInvalidRegEx() throws Exception {
-    new StringParam("p", "Baa", Pattern.compile("A.*")) {
-    };
-  }
-}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties

@@ -0,0 +1,22 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#log4j.appender.test=org.apache.log4j.varia.NullAppender
+#log4j.appender.test=org.apache.log4j.ConsoleAppender
+log4j.appender.test=org.apache.log4j.FileAppender
+log4j.appender.test.File=${test.dir}/test.log
+log4j.appender.test.Append=true
+log4j.appender.test.layout=org.apache.log4j.PatternLayout
+log4j.appender.test.layout.ConversionPattern=%d{ISO8601} %5p %20c{1}: %4L - %m%n
+log4j.rootLogger=ALL, test
+

+ 170 - 0
hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml

@@ -0,0 +1,170 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project-dist</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project-dist</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-hdfs-raid</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <name>Apache Hadoop HDFS Raid</name>
+  <description>Apache Hadoop HDFS Raid</description>
+
+
+  <properties>
+    <hadoop.component>raid</hadoop.component>
+    <is.hadoop.component>false</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-archives</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+
+    <plugins>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-mrapp-generated-classpath</id>
+            <phase>generate-test-resources</phase>
+            <goals>
+              <goal>build-classpath</goal>
+            </goals>
+            <configuration>
+              <!--
+              This is needed to run the unit tests. It generates the required classpath
+              that is required in the env of the launch container in the mini mr/yarn cluster.
+              -->
+              <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+      <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile combine.self="override"></excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <profiles>
+    <profile>
+      <id>docs</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-site-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>docs</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>site</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
+    <profile>
+      <id>dist</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-assemblies</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <id>dist</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+                <configuration>
+                  <finalName>${project.artifactId}-${project.version}</finalName>
+                  <appendAssemblyId>false</appendAssemblyId>
+                  <attach>false</attach>
+                  <descriptorRefs>
+                    <descriptorRef>hadoop-raid-dist</descriptorRef>
+                  </descriptorRefs>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

+ 0 - 0
hadoop-mapreduce-project/src/contrib/raid/conf/raid.xml → hadoop-hdfs-project/hadoop-hdfs-raid/src/main/conf/raid.xml


+ 0 - 0
hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java → hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java


+ 0 - 0
hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java → hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/RaidDFSUtil.java


+ 0 - 0
hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java → hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java


+ 171 - 123
hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java → hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java

@@ -34,7 +34,9 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -56,8 +58,10 @@ public class RaidBlockSender implements java.io.Closeable {
   private DataInputStream checksumIn; // checksum datastream
   private DataChecksum checksum; // checksum stream
   private long offset; // starting position to read
+  /** Initial position to read */
+  private long initialOffset;
   private long endOffset; // ending position
-  private int bytesPerChecksum; // chunk size
+  private int chunkSize; // chunk size
   private int checksumSize; // checksum size
   private boolean corruptChecksumOk; // if need to verify checksum
   private boolean chunkOffsetOK; // if need to send chunk offset
@@ -74,6 +78,8 @@ public class RaidBlockSender implements java.io.Closeable {
    * not sure if there will be much more improvement.
    */
   private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024;
+  private static final int TRANSFERTO_BUFFER_SIZE = Math.max(
+      HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO);
   private volatile ChunkChecksum lastChunkChecksum = null;
 
   
@@ -125,12 +131,13 @@ public class RaidBlockSender implements java.io.Closeable {
        * is mostly corrupted. For now just truncate bytesPerchecksum to
        * blockLength.
        */        
-      bytesPerChecksum = checksum.getBytesPerChecksum();
-      if (bytesPerChecksum > 10*1024*1024 && bytesPerChecksum > replicaVisibleLength) {
+      int size = checksum.getBytesPerChecksum();
+      if (size > 10*1024*1024 && size > replicaVisibleLength) {
         checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(),
             Math.max((int)replicaVisibleLength, 10*1024*1024));
-        bytesPerChecksum = checksum.getBytesPerChecksum();        
+        size = checksum.getBytesPerChecksum();        
       }
+      chunkSize = size;
       checksumSize = checksum.getChecksumSize();
 
       if (length < 0) {
@@ -147,12 +154,12 @@ public class RaidBlockSender implements java.io.Closeable {
         throw new IOException(msg);
       }
       
-      offset = (startOffset - (startOffset % bytesPerChecksum));
+      offset = (startOffset - (startOffset % chunkSize));
       if (length >= 0) {
         // Make sure endOffset points to end of a checksumed chunk.
         long tmpLen = startOffset + length;
-        if (tmpLen % bytesPerChecksum != 0) {
-          tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum);
+        if (tmpLen % chunkSize != 0) {
+          tmpLen += (chunkSize - tmpLen % chunkSize);
         }
         if (tmpLen < endOffset) {
           // will use on-disk checksum here since the end is a stable chunk
@@ -162,7 +169,7 @@ public class RaidBlockSender implements java.io.Closeable {
 
       // seek to the right offsets
       if (offset > 0) {
-        long checksumSkip = (offset / bytesPerChecksum) * checksumSize;
+        long checksumSkip = (offset / chunkSize) * checksumSize;
         // note blockInStream is seeked when created below
         if (checksumSkip > 0) {
           // Should we use seek() for checksum file as well?
@@ -178,7 +185,7 @@ public class RaidBlockSender implements java.io.Closeable {
       throw ioe;
     }
   }
-
+  
   /**
    * close opened files.
    */
@@ -227,57 +234,85 @@ public class RaidBlockSender implements java.io.Closeable {
     // otherwise just return the same exception.
     return ioe;
   }
-
+  
   /**
-   * Sends upto maxChunks chunks of data.
-   * 
-   * When blockInPosition is >= 0, assumes 'out' is a 
-   * {@link SocketOutputStream} and tries 
-   * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
-   * send data (and updates blockInPosition).
+   * @param datalen Length of data 
+   * @return number of chunks for data of given size
    */
-  private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) 
-                         throws IOException {
-    // Sends multiple chunks in one packet with a single write().
-
-    int len = (int) Math.min(endOffset - offset,
-                             (((long) bytesPerChecksum) * ((long) maxChunks)));
-    int numChunks = (len + bytesPerChecksum - 1)/bytesPerChecksum;
-    int packetLen = len + numChunks*checksumSize + 4;
-    boolean lastDataPacket = offset + len == endOffset && len > 0;
+  private int numberOfChunks(long datalen) {
+    return (int) ((datalen + chunkSize - 1)/chunkSize);
+  }
+  
+  /**
+   * Write packet header into {@code pkt}
+   */
+  private void writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
     pkt.clear();
-
-
-    PacketHeader header = new PacketHeader(
-      packetLen, offset, seqno, (len == 0), len);
+    PacketHeader header = new PacketHeader(packetLen, offset, seqno,
+        (dataLen == 0), dataLen, false);
     header.putInBuffer(pkt);
+  }
+  
+  /**
+   * Read checksum into given buffer
+   * @param buf buffer to read the checksum into
+   * @param checksumOffset offset at which to write the checksum into buf
+   * @param checksumLen length of checksum to write
+   * @throws IOException on error
+   */
+  private void readChecksum(byte[] buf, final int checksumOffset,
+      final int checksumLen) throws IOException {
+    if (checksumSize <= 0 && checksumIn == null) {
+      return;
+    }
+    try {
+      checksumIn.readFully(buf, checksumOffset, checksumLen);
+    } catch (IOException e) {
+      LOG.warn(" Could not read or failed to veirfy checksum for data"
+          + " at offset " + offset + " for block " + block, e);
+      IOUtils.closeStream(checksumIn);
+      checksumIn = null;
+      if (corruptChecksumOk) {
+        if (checksumOffset < checksumLen) {
+          // Just fill the array with zeros.
+          Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
+        }
+      } else {
+        throw e;
+      }
+    }
+  }
+  
+  /**
+   * Sends a packet with up to maxChunks chunks of data.
+   * 
+   * @param pkt buffer used for writing packet data
+   * @param maxChunks maximum number of chunks to send
+   * @param out stream to send data to
+   * @param transferTo use transferTo to send data
+   * @param throttler used for throttling data transfer bandwidth
+   */
+  private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out,
+      boolean transferTo, DataTransferThrottler throttler) throws IOException {
+    int dataLen = (int) Math.min(endOffset - offset,
+                             (chunkSize * (long) maxChunks));
+    
+    int numChunks = numberOfChunks(dataLen); // Number of chunks be sent in the packet
+    int checksumDataLen = numChunks * checksumSize;
+    int packetLen = dataLen + checksumDataLen + 4;
+    boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
+
+    writePacketHeader(pkt, dataLen, packetLen);
 
     int checksumOff = pkt.position();
-    int checksumLen = numChunks * checksumSize;
     byte[] buf = pkt.array();
     
     if (checksumSize > 0 && checksumIn != null) {
-      try {
-        checksumIn.readFully(buf, checksumOff, checksumLen);
-      } catch (IOException e) {
-        LOG.warn(" Could not read or failed to veirfy checksum for data" +
-                 " at offset " + offset + " for block " + block + " got : "
-                 + StringUtils.stringifyException(e));
-        IOUtils.closeStream(checksumIn);
-        checksumIn = null;
-        if (corruptChecksumOk) {
-          if (checksumOff < checksumLen) {
-            // Just fill the array with zeros.
-            Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
-          }
-        } else {
-          throw e;
-        }
-      }
+      readChecksum(buf, checksumOff, checksumDataLen);
 
       // write in progress that we need to use to get last checksum
       if (lastDataPacket && lastChunkChecksum != null) {
-        int start = checksumOff + checksumLen - checksumSize;
+        int start = checksumOff + checksumDataLen - checksumSize;
         byte[] updatedChecksum = lastChunkChecksum.getChecksum();
         
         if (updatedChecksum != null) {
@@ -286,61 +321,85 @@ public class RaidBlockSender implements java.io.Closeable {
       }
     }
     
-    int dataOff = checksumOff + checksumLen;
-    
-    if (blockInPosition < 0) {
-      //normal transfer
-      IOUtils.readFully(blockIn, buf, dataOff, len);
+    int dataOff = checksumOff + checksumDataLen;
+    if (!transferTo) { // normal transfer
+      IOUtils.readFully(blockIn, buf, dataOff, dataLen);
 
       if (verifyChecksum) {
-        int dOff = dataOff;
-        int cOff = checksumOff;
-        int dLeft = len;
-
-        for (int i=0; i<numChunks; i++) {
-          checksum.reset();
-          int dLen = Math.min(dLeft, bytesPerChecksum);
-          checksum.update(buf, dOff, dLen);
-          if (!checksum.compare(buf, cOff)) {
-            long failedPos = offset + len -dLeft;
-            throw new ChecksumException("Checksum failed at " + 
-                                        failedPos, failedPos);
-          }
-          dLeft -= dLen;
-          dOff += dLen;
-          cOff += checksumSize;
-        }
+        verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
       }
-      //writing is done below (mainly to handle IOException)
     }
     
     try {
-      if (blockInPosition >= 0) {
-        //use transferTo(). Checks on out and blockIn are already done. 
-
+      if (transferTo) {
         SocketOutputStream sockOut = (SocketOutputStream)out;
-        //first write the packet
-        sockOut.write(buf, 0, dataOff);
+        sockOut.write(buf, 0, dataOff); // First write checksum
+        
         // no need to flush. since we know out is not a buffered stream. 
-
         sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
-                                blockInPosition, len);
-
-        blockInPosition += len;
-      } else {
+                                blockInPosition, dataLen);
+        blockInPosition += dataLen;
+      } else { 
         // normal transfer
-        out.write(buf, 0, dataOff + len);
+        out.write(buf, 0, dataOff + dataLen);
       }
-      
     } catch (IOException e) {
-      /* exception while writing to the client (well, with transferTo(),
-       * it could also be while reading from the local file).
+      /* Exception while writing to the client. Connection closure from
+       * the other end is mostly the case and we do not care much about
+       * it. But other things can go wrong, especially in transferTo(),
+       * which we do not want to ignore.
+       *
+       * The message parsing below should not be considered as a good
+       * coding example. NEVER do it to drive a program logic. NEVER.
+       * It was done here because the NIO throws an IOException for EPIPE.
        */
+      String ioem = e.getMessage();
+      if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
+        LOG.error("BlockSender.sendChunks() exception: ", e);
+      }
       throw ioeToSocketException(e);
     }
 
-    return len;
+    if (throttler != null) { // rebalancing so throttle
+      throttler.throttle(packetLen);
+    }
+
+    return dataLen;
   }
+  
+  /**
+   * Compute checksum for chunks and verify the checksum that is read from
+   * the metadata file is correct.
+   * 
+   * @param buf buffer that has checksum and data
+   * @param dataOffset position where data is written in the buf
+   * @param datalen length of data
+   * @param numChunks number of chunks corresponding to data
+   * @param checksumOffset offset where checksum is written in the buf
+   * @throws ChecksumException on failed checksum verification
+   */
+  public void verifyChecksum(final byte[] buf, final int dataOffset,
+      final int datalen, final int numChunks, final int checksumOffset)
+      throws ChecksumException {
+    int dOff = dataOffset;
+    int cOff = checksumOffset;
+    int dLeft = datalen;
+
+    for (int i = 0; i < numChunks; i++) {
+      checksum.reset();
+      int dLen = Math.min(dLeft, chunkSize);
+      checksum.update(buf, dOff, dLen);
+      if (!checksum.compare(buf, cOff)) {
+        long failedPos = offset + datalen - dLeft;
+        throw new ChecksumException("Checksum failed at " + failedPos,
+            failedPos);
+      }
+      dLeft -= dLen;
+      dOff += dLen;
+      cOff += checksumSize;
+    }
+  }
+
 
   /**
    * sendBlock() is used to read block and its metadata and stream the data to
@@ -356,79 +415,61 @@ public class RaidBlockSender implements java.io.Closeable {
    */
   public long sendBlock(DataOutputStream out, OutputStream baseStream)
       throws IOException {
-    if( out == null ) {
+    if (out == null) {
       throw new IOException( "out stream is null" );
     }
-
-    long initialOffset = offset;
+    initialOffset = offset;
     long totalRead = 0;
     OutputStream streamForSendChunks = out;
     
     final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
     try {
-      try {
-        checksum.writeHeader(out);
-        if ( chunkOffsetOK ) {
-          out.writeLong( offset );
-        }
-        out.flush();
-      } catch (IOException e) { //socket error
-        throw ioeToSocketException(e);
-      }
-      
       int maxChunksPerPacket;
       int pktSize = PacketHeader.PKT_HEADER_LEN;
-      
-      if (transferToAllowed && !verifyChecksum && 
-          baseStream instanceof SocketOutputStream && 
-          blockIn instanceof FileInputStream) {
-        
+      boolean transferTo = transferToAllowed && !verifyChecksum
+          && baseStream instanceof SocketOutputStream
+          && blockIn instanceof FileInputStream;
+      if (transferTo) {
         FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
-        
-        // blockInPosition also indicates sendChunks() uses transferTo.
         blockInPosition = fileChannel.position();
         streamForSendChunks = baseStream;
+        maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
         
-        // assure a mininum buffer size.
-        maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, 
-                                       MIN_BUFFER_WITH_TRANSFERTO)
-                              + bytesPerChecksum - 1)/bytesPerChecksum;
-        
-        // allocate smaller buffer while using transferTo(). 
+        // Smaller packet size to only hold checksum when doing transferTo
         pktSize += checksumSize * maxChunksPerPacket;
       } else {
         maxChunksPerPacket = Math.max(1,
-            (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
-        pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
+            numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE));
+        // Packet size includes both checksum and data
+        pktSize += (chunkSize + checksumSize) * maxChunksPerPacket;
       }
 
       ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);
 
       while (endOffset > offset) {
-        long len = sendChunks(pktBuf, maxChunksPerPacket, 
-                              streamForSendChunks);
+        long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
+            transferTo, null);
         offset += len;
-        totalRead += len + ((len + bytesPerChecksum - 1)/bytesPerChecksum*
-                            checksumSize);
+        totalRead += len + (numberOfChunks(len) * checksumSize);
         seqno++;
       }
       try {
         // send an empty packet to mark the end of the block
-        sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);        
+        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
+            null);
         out.flush();
       } catch (IOException e) { //socket error
         throw ioeToSocketException(e);
       }
+      blockReadFully = true;
     } finally {
       if (clientTraceFmt != null) {
         final long endTime = System.nanoTime();
-        ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
+        ClientTraceLog.info(String.format(clientTraceFmt, totalRead,
+            initialOffset, endTime - startTime));
       }
       close();
     }
-
-    blockReadFully = initialOffset == 0 && offset >= replicaVisibleLength;
-
     return totalRead;
   }
   
@@ -440,6 +481,13 @@ public class RaidBlockSender implements java.io.Closeable {
     public InputStream createStream(long offset) throws IOException; 
   }
   
+  /**
+   * @return the checksum type that will be used with this block transfer.
+   */
+  public DataChecksum getChecksum() {
+    return checksum;
+  }
+  
   private static class BlockInputStreamFactory implements InputStreamFactory {
     private final ExtendedBlock block;
     private final FsDatasetSpi<?> data;

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels