1
0
Prechádzať zdrojové kódy

Merge trunk into QJM branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1367365 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 rokov pred
rodič
commit
e1dff3df99
100 zmenil súbory, kde vykonal 6150 pridanie a 795 odobranie
  1. 2 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  2. 32 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 3 6
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  4. 25 2
      hadoop-common-project/hadoop-common/src/main/conf/ssl-client.xml.example
  5. 24 2
      hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
  6. 19 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  7. 15 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  8. 0 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  9. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  10. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
  11. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
  12. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  13. 23 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  14. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  15. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
  16. 9 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java
  17. 241 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
  18. 67 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/KeyStoresFactory.java
  19. 204 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
  20. 237 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
  21. 585 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
  22. 8 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
  23. 48 1
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  24. 141 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
  25. 6 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  26. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
  27. 36 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
  28. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
  29. 270 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
  30. 175 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
  31. 164 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
  32. 48 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
  33. 25 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
  34. 5 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/dev-support/findbugsExcludeFile.xml
  35. 58 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  36. 158 125
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  37. 226 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
  38. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java
  39. 148 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java
  40. 6 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
  41. 255 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
  42. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
  43. 5 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
  44. 57 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java
  45. 76 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java
  46. 49 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java
  47. 231 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java
  48. 65 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
  49. 29 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
  50. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml
  51. 14 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
  52. 2 14
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java
  53. 11 17
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
  54. 310 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
  55. 111 18
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  56. 291 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
  57. 83 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java
  58. 138 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java
  59. 1 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java
  60. 22 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
  61. 28 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/krb5.conf
  62. 36 3
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  63. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
  64. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml
  65. 6 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
  66. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  67. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  68. 12 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  69. 11 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  70. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  71. 66 68
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  72. 7 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  73. 68 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
  74. 12 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
  75. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  76. 7 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  77. 33 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  78. 212 120
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  79. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
  80. 18 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
  81. 28 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
  82. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
  83. 9 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
  84. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
  85. 506 116
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c
  86. 66 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.h
  87. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h
  88. 23 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c
  89. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_file_handle.h
  90. 10 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chmod.c
  91. 12 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c
  92. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c
  93. 16 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c
  94. 17 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c
  95. 48 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
  96. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c
  97. 16 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c
  98. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c
  99. 16 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c
  100. 26 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c

+ 2 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -111,9 +111,9 @@
       <outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
       <outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
     </fileSet>
     </fileSet>
     <fileSet>
     <fileSet>
-      <directory>${basedir}/src/main/native</directory>
+      <directory>${basedir}/src/main/native/libhdfs</directory>
       <includes>
       <includes>
-        <include>*.h</include>
+        <include>hdfs.h</include>
       </includes>
       </includes>
       <outputDirectory>/include</outputDirectory>
       <outputDirectory>/include</outputDirectory>
     </fileSet>
     </fileSet>

+ 32 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -88,6 +88,9 @@ Trunk (unreleased changes)
     HADOOP-8523. test-patch.sh doesn't validate patches before building
     HADOOP-8523. test-patch.sh doesn't validate patches before building
     (Jack Dintruff via jeagles)
     (Jack Dintruff via jeagles)
 
 
+    HADOOP-8624. ProtobufRpcEngine should log all RPCs if TRACE logging is
+    enabled (todd)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -181,6 +184,9 @@ Trunk (unreleased changes)
     HADOOP-8593. Add missed @Override annotations in Metric/Metrics2 package.
     HADOOP-8593. Add missed @Override annotations in Metric/Metrics2 package.
     (Brandon Li via suresh)
     (Brandon Li via suresh)
 
 
+    HADOOP-8623. hadoop jar command should respect HADOOP_OPTS.
+    (Steven Willis via suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -268,6 +274,9 @@ Branch-2 ( Unreleased changes )
     serializer or deserializer isn't available
     serializer or deserializer isn't available
     (Madhukara Phatak via harsh)
     (Madhukara Phatak via harsh)
 
 
+    HADOOP-8609. IPC server logs a useless message when shutting down socket.
+    (Jon Zuanich via atm)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -358,6 +367,10 @@ Branch-2 ( Unreleased changes )
     HADOOP-8537. Fix TFile tests to pass even when native zlib support is not
     HADOOP-8537. Fix TFile tests to pass even when native zlib support is not
     compiled. (todd)
     compiled. (todd)
 
 
+    HADOOP-8626. Typo in default setting for
+    hadoop.security.group.mapping.ldap.search.filter.user. (Jonathan Natkins
+    via atm)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -835,6 +848,25 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8599. Non empty response from FileSystem.getFileBlockLocations when
     HADOOP-8599. Non empty response from FileSystem.getFileBlockLocations when
     asking for data beyond the end of file. (Andrey Klochkov via todd)
     asking for data beyond the end of file. (Andrey Klochkov via todd)
 
 
+    HADOOP-8606. FileSystem.get may return the wrong filesystem (Daryn Sharp
+    via bobby)
+
+    HADOOP-8551. fs -mkdir creates parent directories without the -p option
+    (John George via bobby)
+
+    HADOOP-8613. AbstractDelegationTokenIdentifier#getUser() should set token
+    auth type. (daryn)
+
+    HADOOP-8627. FS deleteOnExit may delete the wrong path (daryn via bobby)
+
+    HADOOP-8634. Ensure FileSystem#close doesn't squawk for deleteOnExit paths 
+    (daryn via bobby)
+
+    HADOOP-8550. hadoop fs -touchz automatically created parent directories
+    (John George via bobby)
+
+    HADOOP-8635. Cannot cancel paths registered deleteOnExit (daryn via bobby)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 3 - 6
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -96,33 +96,30 @@ case $COMMAND in
     # the core commands
     # the core commands
     if [ "$COMMAND" = "fs" ] ; then
     if [ "$COMMAND" = "fs" ] ; then
       CLASS=org.apache.hadoop.fs.FsShell
       CLASS=org.apache.hadoop.fs.FsShell
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
     elif [ "$COMMAND" = "version" ] ; then
     elif [ "$COMMAND" = "version" ] ; then
       CLASS=org.apache.hadoop.util.VersionInfo
       CLASS=org.apache.hadoop.util.VersionInfo
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
     elif [ "$COMMAND" = "jar" ] ; then
     elif [ "$COMMAND" = "jar" ] ; then
       CLASS=org.apache.hadoop.util.RunJar
       CLASS=org.apache.hadoop.util.RunJar
     elif [ "$COMMAND" = "distcp" ] ; then
     elif [ "$COMMAND" = "distcp" ] ; then
       CLASS=org.apache.hadoop.tools.DistCp
       CLASS=org.apache.hadoop.tools.DistCp
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
     elif [ "$COMMAND" = "daemonlog" ] ; then
     elif [ "$COMMAND" = "daemonlog" ] ; then
       CLASS=org.apache.hadoop.log.LogLevel
       CLASS=org.apache.hadoop.log.LogLevel
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
     elif [ "$COMMAND" = "archive" ] ; then
     elif [ "$COMMAND" = "archive" ] ; then
       CLASS=org.apache.hadoop.tools.HadoopArchives
       CLASS=org.apache.hadoop.tools.HadoopArchives
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
     elif [[ "$COMMAND" = -*  ]] ; then
     elif [[ "$COMMAND" = -*  ]] ; then
         # class and package names cannot begin with a -
         # class and package names cannot begin with a -
         echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
         echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
         exit 1
         exit 1
     else
     else
-      HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
       CLASS=$COMMAND
       CLASS=$COMMAND
     fi
     fi
     shift
     shift
     
     
+    # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+    HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+
     #make sure security appender is turned off
     #make sure security appender is turned off
     HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
     HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
 
 

+ 25 - 2
hadoop-common-project/hadoop-common/src/main/conf/ssl-client.xml.example

@@ -1,6 +1,21 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
 
 
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
 <configuration>
 <configuration>
 
 
 <property>
 <property>
@@ -21,7 +36,15 @@
 <property>
 <property>
   <name>ssl.client.truststore.type</name>
   <name>ssl.client.truststore.type</name>
   <value>jks</value>
   <value>jks</value>
-  <description>Optional. Default value is "jks".
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
   </description>
   </description>
 </property>
 </property>
 
 
@@ -50,7 +73,7 @@
 <property>
 <property>
   <name>ssl.client.keystore.type</name>
   <name>ssl.client.keystore.type</name>
   <value>jks</value>
   <value>jks</value>
-  <description>Optional. Default value is "jks".
+  <description>Optional. The keystore file format, default value is "jks".
   </description>
   </description>
 </property>
 </property>
 
 

+ 24 - 2
hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example

@@ -1,6 +1,21 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
 
 
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
 <configuration>
 <configuration>
 
 
 <property>
 <property>
@@ -20,10 +35,17 @@
 <property>
 <property>
   <name>ssl.server.truststore.type</name>
   <name>ssl.server.truststore.type</name>
   <value>jks</value>
   <value>jks</value>
-  <description>Optional. Default value is "jks".
+  <description>Optional. The keystore file format, default value is "jks".
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>ssl.server.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
+</property>
+
 <property>
 <property>
   <name>ssl.server.keystore.location</name>
   <name>ssl.server.keystore.location</name>
   <value></value>
   <value></value>
@@ -48,7 +70,7 @@
 <property>
 <property>
   <name>ssl.server.keystore.type</name>
   <name>ssl.server.keystore.type</name>
   <value>jks</value>
   <value>jks</value>
-  <description>Optional. Default value is "jks".
+  <description>Optional. The keystore file format, default value is "jks".
   </description>
   </description>
 </property>
 </property>
 
 

+ 19 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -899,6 +899,25 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     }
     return Integer.parseInt(valueString);
     return Integer.parseInt(valueString);
   }
   }
+  
+  /**
+   * Get the value of the <code>name</code> property as a set of comma-delimited
+   * <code>int</code> values.
+   * 
+   * If no such property exists, an empty array is returned.
+   * 
+   * @param name property name
+   * @return property value interpreted as an array of comma-delimited
+   *         <code>int</code> values
+   */
+  public int[] getInts(String name) {
+    String[] strings = getTrimmedStrings(name);
+    int[] ints = new int[strings.length];
+    for (int i = 0; i < strings.length; i++) {
+      ints[i] = Integer.parseInt(strings[i]);
+    }
+    return ints;
+  }
 
 
   /** 
   /** 
    * Set the value of the <code>name</code> property to an <code>int</code>.
    * Set the value of the <code>name</code> property to an <code>int</code>.

+ 15 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -280,11 +280,11 @@ public abstract class FileSystem extends Configured implements Closeable {
     String scheme = uri.getScheme();
     String scheme = uri.getScheme();
     String authority = uri.getAuthority();
     String authority = uri.getAuthority();
 
 
-    if (scheme == null) {                       // no scheme: use default FS
+    if (scheme == null && authority == null) {     // use default FS
       return get(conf);
       return get(conf);
     }
     }
 
 
-    if (authority == null) {                       // no authority
+    if (scheme != null && authority == null) {     // no authority
       URI defaultUri = getDefaultUri(conf);
       URI defaultUri = getDefaultUri(conf);
       if (scheme.equals(defaultUri.getScheme())    // if scheme matches default
       if (scheme.equals(defaultUri.getScheme())    // if scheme matches default
           && defaultUri.getAuthority() != null) {  // & default has authority
           && defaultUri.getAuthority() != null) {  // & default has authority
@@ -1214,6 +1214,16 @@ public abstract class FileSystem extends Configured implements Closeable {
     }
     }
     return true;
     return true;
   }
   }
+  
+  /**
+   * Cancel the deletion of the path when the FileSystem is closed
+   * @param f the path to cancel deletion
+   */
+  public boolean cancelDeleteOnExit(Path f) {
+    synchronized (deleteOnExit) {
+      return deleteOnExit.remove(f);
+    }
+  }
 
 
   /**
   /**
    * Delete all files that were marked as delete-on-exit. This recursively
    * Delete all files that were marked as delete-on-exit. This recursively
@@ -1224,7 +1234,9 @@ public abstract class FileSystem extends Configured implements Closeable {
       for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) {
       for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) {
         Path path = iter.next();
         Path path = iter.next();
         try {
         try {
-          delete(path, true);
+          if (exists(path)) {
+            delete(path, true);
+          }
         }
         }
         catch (IOException e) {
         catch (IOException e) {
           LOG.info("Ignoring failure to deleteOnExit for path " + path);
           LOG.info("Ignoring failure to deleteOnExit for path " + path);

+ 0 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -191,23 +191,6 @@ public class FilterFileSystem extends FileSystem {
     return fs.delete(f, recursive);
     return fs.delete(f, recursive);
   }
   }
   
   
-  /**
-   * Mark a path to be deleted when FileSystem is closed.
-   * When the JVM shuts down,
-   * all FileSystem objects will be closed automatically.
-   * Then,
-   * the marked path will be deleted as a result of closing the FileSystem.
-   *
-   * The path has to exist in the file system.
-   * 
-   * @param f the path to delete.
-   * @return  true if deleteOnExit is successful, otherwise false.
-   * @throws IOException
-   */
-  public boolean deleteOnExit(Path f) throws IOException {
-    return fs.deleteOnExit(f);
-  }    
-
   /** List files in a directory. */
   /** List files in a directory. */
   public FileStatus[] listStatus(Path f) throws IOException {
   public FileStatus[] listStatus(Path f) throws IOException {
     return fs.listStatus(f);
     return fs.listStatus(f);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -139,7 +139,7 @@ public class Path implements Comparable {
    * Construct a path from a URI
    * Construct a path from a URI
    */
    */
   public Path(URI aUri) {
   public Path(URI aUri) {
-    uri = aUri;
+    uri = aUri.normalize();
   }
   }
   
   
   /** Construct a Path from components. */
   /** Construct a Path from components. */

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java

@@ -23,9 +23,11 @@ import java.util.LinkedList;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.PathExceptions.PathExistsException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathExistsException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.shell.PathExceptions.PathNotFoundException;
 
 
 /**
 /**
  * Create the given dir
  * Create the given dir
@@ -66,7 +68,11 @@ class Mkdir extends FsCommand {
 
 
   @Override
   @Override
   protected void processNonexistentPath(PathData item) throws IOException {
   protected void processNonexistentPath(PathData item) throws IOException {
-    // TODO: should use createParents to control intermediate dir creation 
+    // check if parent exists. this is complicated because getParent(a/b/c/) returns a/b/c, but
+    // we want a/b
+    if (!item.fs.exists(new Path(item.path.toString()).getParent()) && !createParents) {
+      throw new PathNotFoundException(item.toString());
+    }
     if (!item.fs.mkdirs(item.path)) {
     if (!item.fs.mkdirs(item.path)) {
       throw new PathIOException(item.toString());
       throw new PathIOException(item.toString());
     }
     }

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIsDirectoryException;
 import org.apache.hadoop.fs.shell.PathExceptions.PathIsDirectoryException;
+import org.apache.hadoop.fs.shell.PathExceptions.PathNotFoundException;
 
 
 /**
 /**
  * Unix touch like commands 
  * Unix touch like commands 
@@ -70,6 +71,9 @@ class Touch extends FsCommand {
 
 
     @Override
     @Override
     protected void processNonexistentPath(PathData item) throws IOException {
     protected void processNonexistentPath(PathData item) throws IOException {
+      if (!item.parentExists()) {
+        throw new PathNotFoundException(item.toString());
+      }
       touchz(item);
       touchz(item);
     }
     }
 
 

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -1399,5 +1399,10 @@ public class Client {
       result = PRIME * result + ((ticket == null) ? 0 : ticket.hashCode());
       result = PRIME * result + ((ticket == null) ? 0 : ticket.hashCode());
       return result;
       return result;
     }
     }
+    
+    @Override
+    public String toString() {
+      return serverPrincipal + "@" + address;
+    }
   }  
   }  
 }
 }

+ 23 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -51,13 +51,14 @@ import com.google.protobuf.BlockingService;
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
 import com.google.protobuf.Message;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
+import com.google.protobuf.TextFormat;
 
 
 /**
 /**
  * RPC Engine for for protobuf based RPCs.
  * RPC Engine for for protobuf based RPCs.
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class ProtobufRpcEngine implements RpcEngine {
 public class ProtobufRpcEngine implements RpcEngine {
-  private static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
+  public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
   
   
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
@@ -191,16 +192,29 @@ public class ProtobufRpcEngine implements RpcEngine {
 
 
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
       RpcResponseWritable val = null;
       RpcResponseWritable val = null;
+      
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(Thread.currentThread().getId() + ": Call -> " +
+            remoteId + ": " + method.getName() +
+            " {" + TextFormat.shortDebugString((Message) args[1]) + "}");
+      }
       try {
       try {
         val = (RpcResponseWritable) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         val = (RpcResponseWritable) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
             new RpcRequestWritable(rpcRequest), remoteId);
             new RpcRequestWritable(rpcRequest), remoteId);
+
       } catch (Throwable e) {
       } catch (Throwable e) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(Thread.currentThread().getId() + ": Exception <- " +
+              remoteId + ": " + method.getName() +
+                " {" + e + "}");
+        }
+
         throw new ServiceException(e);
         throw new ServiceException(e);
       }
       }
 
 
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         long callTime = Time.now() - startTime;
         long callTime = Time.now() - startTime;
-        LOG.debug("Call: " + method.getName() + " " + callTime);
+        LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
       }
       }
       
       
       Message prototype = null;
       Message prototype = null;
@@ -213,6 +227,13 @@ public class ProtobufRpcEngine implements RpcEngine {
       try {
       try {
         returnMessage = prototype.newBuilderForType()
         returnMessage = prototype.newBuilderForType()
             .mergeFrom(val.responseMessage).build();
             .mergeFrom(val.responseMessage).build();
+
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(Thread.currentThread().getId() + ": Response <- " +
+              remoteId + ": " + method.getName() +
+                " {" + TextFormat.shortDebugString(returnMessage) + "}");
+        }
+
       } catch (Throwable e) {
       } catch (Throwable e) {
         throw new ServiceException(e);
         throw new ServiceException(e);
       }
       }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1643,7 +1643,7 @@ public abstract class Server {
       if (!channel.isOpen())
       if (!channel.isOpen())
         return;
         return;
       try {socket.shutdownOutput();} catch(Exception e) {
       try {socket.shutdownOutput();} catch(Exception e) {
-        LOG.warn("Ignoring socket shutdown exception");
+        LOG.debug("Ignoring socket shutdown exception", e);
       }
       }
       if (channel.isOpen()) {
       if (channel.isOpen()) {
         try {channel.close();} catch(Exception e) {}
         try {channel.close();} catch(Exception e) {}

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

@@ -45,7 +45,8 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class MutableQuantiles extends MutableMetric {
 public class MutableQuantiles extends MutableMetric {
 
 
-  static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
+  @VisibleForTesting
+  public static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
       new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
       new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
       new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
       new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
 
 
@@ -90,8 +91,7 @@ public class MutableQuantiles extends MutableMetric {
         "Number of %s for %s with %ds interval", lsName, desc, interval));
         "Number of %s for %s with %ds interval", lsName, desc, interval));
     // Construct the MetricsInfos for the quantiles, converting to percentiles
     // Construct the MetricsInfos for the quantiles, converting to percentiles
     quantileInfos = new MetricsInfo[quantiles.length];
     quantileInfos = new MetricsInfo[quantiles.length];
-    String nameTemplate = ucName + "%dthPercentile" + interval + "sInterval"
-        + uvName;
+    String nameTemplate = ucName + "%dthPercentile" + uvName;
     String descTemplate = "%d percentile " + lvName + " with " + interval
     String descTemplate = "%d percentile " + lvName + " with " + interval
         + " second interval for " + desc;
         + " second interval for " + desc;
     for (int i = 0; i < quantiles.length; i++) {
     for (int i = 0; i < quantiles.length; i++) {

+ 9 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java

@@ -31,8 +31,8 @@ import java.nio.channels.WritableByteChannel;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
-import org.apache.hadoop.util.Progressable;
 
 
 /**
 /**
  * This implements an output stream that can have a timeout while writing.
  * This implements an output stream that can have a timeout while writing.
@@ -179,9 +179,9 @@ public class SocketOutputStream extends OutputStream
    * @param fileCh FileChannel to transfer data from.
    * @param fileCh FileChannel to transfer data from.
    * @param position position within the channel where the transfer begins
    * @param position position within the channel where the transfer begins
    * @param count number of bytes to transfer.
    * @param count number of bytes to transfer.
-   * @param waitForWritableTime updated by the nanoseconds spent waiting for 
-   * the socket to become writable
-   * @param transferTime updated by the nanoseconds spent transferring data
+   * @param waitForWritableTime nanoseconds spent waiting for the socket 
+   *        to become writable
+   * @param transferTime nanoseconds spent transferring data
    * 
    * 
    * @throws EOFException 
    * @throws EOFException 
    *         If end of input file is reached before requested number of 
    *         If end of input file is reached before requested number of 
@@ -195,8 +195,8 @@ public class SocketOutputStream extends OutputStream
    *         {@link FileChannel#transferTo(long, long, WritableByteChannel)}. 
    *         {@link FileChannel#transferTo(long, long, WritableByteChannel)}. 
    */
    */
   public void transferToFully(FileChannel fileCh, long position, int count,
   public void transferToFully(FileChannel fileCh, long position, int count,
-      MutableRate waitForWritableTime,
-      MutableRate transferToTime) throws IOException {
+      LongWritable waitForWritableTime,
+      LongWritable transferToTime) throws IOException {
     long waitTime = 0;
     long waitTime = 0;
     long transferTime = 0;
     long transferTime = 0;
     while (count > 0) {
     while (count > 0) {
@@ -236,12 +236,12 @@ public class SocketOutputStream extends OutputStream
       waitTime += wait - start;
       waitTime += wait - start;
       transferTime += transfer - wait;
       transferTime += transfer - wait;
     }
     }
-
+    
     if (waitForWritableTime != null) {
     if (waitForWritableTime != null) {
-      waitForWritableTime.add(waitTime);
+      waitForWritableTime.set(waitTime);
     }
     }
     if (transferToTime != null) {
     if (transferToTime != null) {
-      transferToTime.add(transferTime);
+      transferToTime.set(transferTime);
     }
     }
   }
   }
 
 

+ 241 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java

@@ -0,0 +1,241 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.security.ssl;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.TrustManager;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.GeneralSecurityException;
+import java.security.KeyStore;
+import java.text.MessageFormat;
+
+/**
+ * {@link KeyStoresFactory} implementation that reads the certificates from
+ * keystore files.
+ * <p/>
+ * if the trust certificates keystore file changes, the {@link TrustManager}
+ * is refreshed with the new trust certificate entries (using a
+ * {@link ReloadingX509TrustManager} trustmanager).
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FileBasedKeyStoresFactory implements KeyStoresFactory {
+
+  private static final Log LOG =
+    LogFactory.getLog(FileBasedKeyStoresFactory.class);
+
+  public static final String SSL_KEYSTORE_LOCATION_TPL_KEY =
+    "ssl.{0}.keystore.location";
+  public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY =
+    "ssl.{0}.keystore.password";
+  public static final String SSL_KEYSTORE_TYPE_TPL_KEY =
+    "ssl.{0}.keystore.type";
+
+  public static final String SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY =
+    "ssl.{0}.truststore.reload.interval";
+  public static final String SSL_TRUSTSTORE_LOCATION_TPL_KEY =
+    "ssl.{0}.truststore.location";
+  public static final String SSL_TRUSTSTORE_PASSWORD_TPL_KEY =
+    "ssl.{0}.truststore.password";
+  public static final String SSL_TRUSTSTORE_TYPE_TPL_KEY =
+    "ssl.{0}.truststore.type";
+
+  /**
+   * Default format of the keystore files.
+   */
+  public static final String DEFAULT_KEYSTORE_TYPE = "jks";
+
+  /**
+   * Reload interval in milliseconds.
+   */
+  public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000;
+
+  private Configuration conf;
+  private KeyManager[] keyManagers;
+  private TrustManager[] trustManagers;
+  private ReloadingX509TrustManager trustManager;
+
+  /**
+   * Resolves a property name to its client/server version if applicable.
+   * <p/>
+   * NOTE: This method is public for testing purposes.
+   *
+   * @param mode client/server mode.
+   * @param template property name template.
+   * @return the resolved property name.
+   */
+  @VisibleForTesting
+  public static String resolvePropertyName(SSLFactory.Mode mode,
+                                           String template) {
+    return MessageFormat.format(template, mode.toString().toLowerCase());
+  }
+
+  /**
+   * Sets the configuration for the factory.
+   *
+   * @param conf the configuration for the factory.
+   */
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  /**
+   * Returns the configuration of the factory.
+   *
+   * @return the configuration of the factory.
+   */
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Initializes the keystores of the factory.
+   *
+   * @param mode if the keystores are to be used in client or server mode.
+   * @throws IOException thrown if the keystores could not be initialized due
+   * to an IO error.
+   * @throws GeneralSecurityException thrown if the keystores could not be
+   * initialized due to a security error.
+   */
+  public void init(SSLFactory.Mode mode)
+    throws IOException, GeneralSecurityException {
+
+    boolean requireClientCert =
+      conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, true);
+
+    // certificate store
+    String keystoreType =
+      conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY),
+               DEFAULT_KEYSTORE_TYPE);
+    KeyStore keystore = KeyStore.getInstance(keystoreType);
+    String keystorePassword = null;
+    if (requireClientCert || mode == SSLFactory.Mode.SERVER) {
+      String locationProperty =
+        resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY);
+      String keystoreLocation = conf.get(locationProperty, "");
+      if (keystoreLocation.isEmpty()) {
+        throw new GeneralSecurityException("The property '" + locationProperty +
+          "' has not been set in the ssl configuration file.");
+      }
+      String passwordProperty =
+        resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY);
+      keystorePassword = conf.get(passwordProperty, "");
+      if (keystorePassword.isEmpty()) {
+        throw new GeneralSecurityException("The property '" + passwordProperty +
+          "' has not been set in the ssl configuration file.");
+      }
+      LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
+
+      InputStream is = new FileInputStream(keystoreLocation);
+      try {
+        keystore.load(is, keystorePassword.toCharArray());
+      } finally {
+        is.close();
+      }
+      LOG.info(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
+    } else {
+      keystore.load(null, null);
+    }
+    KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
+    keyMgrFactory.init(keystore, (keystorePassword != null) ?
+                                 keystorePassword.toCharArray() : null);
+    keyManagers = keyMgrFactory.getKeyManagers();
+
+    //trust store
+    String truststoreType =
+      conf.get(resolvePropertyName(mode, SSL_TRUSTSTORE_TYPE_TPL_KEY),
+               DEFAULT_KEYSTORE_TYPE);
+
+    String locationProperty =
+      resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY);
+    String truststoreLocation = conf.get(locationProperty, "");
+    if (truststoreLocation.isEmpty()) {
+      throw new GeneralSecurityException("The property '" + locationProperty +
+        "' has not been set in the ssl configuration file.");
+    }
+
+    String passwordProperty = resolvePropertyName(mode,
+                                                  SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
+    String truststorePassword = conf.get(passwordProperty, "");
+    if (truststorePassword.isEmpty()) {
+      throw new GeneralSecurityException("The property '" + passwordProperty +
+        "' has not been set in the ssl configuration file.");
+    }
+    long truststoreReloadInterval =
+      conf.getLong(
+        resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
+        DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
+
+    LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
+
+    trustManager = new ReloadingX509TrustManager(truststoreType,
+                                                 truststoreLocation,
+                                                 truststorePassword,
+                                                 truststoreReloadInterval);
+    trustManager.init();
+    LOG.info(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+
+    trustManagers = new TrustManager[]{trustManager};
+  }
+
+  /**
+   * Releases any resources being used.
+   */
+  @Override
+  public synchronized void destroy() {
+    if (trustManager != null) {
+      trustManager.destroy();
+      trustManager = null;
+      keyManagers = null;
+      trustManagers = null;
+    }
+  }
+
+  /**
+   * Returns the keymanagers for owned certificates.
+   *
+   * @return the keymanagers for owned certificates.
+   */
+  @Override
+  public KeyManager[] getKeyManagers() {
+    return keyManagers;
+  }
+
+  /**
+   * Returns the trustmanagers for trusted certificates.
+   *
+   * @return the trustmanagers for trusted certificates.
+   */
+  @Override
+  public TrustManager[] getTrustManagers() {
+    return trustManagers;
+  }
+
+}

+ 67 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/KeyStoresFactory.java

@@ -0,0 +1,67 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.TrustManager;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+
+/**
+ * Interface that gives access to {@link KeyManager} and {@link TrustManager}
+ * implementations.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface KeyStoresFactory extends Configurable {
+
+  /**
+   * Initializes the keystores of the factory.
+   *
+   * @param mode if the keystores are to be used in client or server mode.
+   * @throws IOException thrown if the keystores could not be initialized due
+   * to an IO error.
+   * @throws GeneralSecurityException thrown if the keystores could not be
+   * initialized due to an security error.
+   */
+  public void init(SSLFactory.Mode mode) throws IOException, GeneralSecurityException;
+
+  /**
+   * Releases any resources being used.
+   */
+  public void destroy();
+
+  /**
+   * Returns the keymanagers for owned certificates.
+   *
+   * @return the keymanagers for owned certificates.
+   */
+  public KeyManager[] getKeyManagers();
+
+  /**
+   * Returns the trustmanagers for trusted certificates.
+   *
+   * @return the trustmanagers for trusted certificates.
+   */
+  public TrustManager[] getTrustManagers();
+
+}

+ 204 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java

@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.ssl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
+import javax.net.ssl.X509TrustManager;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.security.KeyStore;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * A {@link TrustManager} implementation that reloads its configuration when
+ * the truststore file on disk changes.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class ReloadingX509TrustManager
+  implements X509TrustManager, Runnable {
+
+  private static final Log LOG =
+    LogFactory.getLog(ReloadingX509TrustManager.class);
+
+  private String type;
+  private File file;
+  private String password;
+  private long lastLoaded;
+  private long reloadInterval;
+  private AtomicReference<X509TrustManager> trustManagerRef;
+
+  private volatile boolean running;
+  private Thread reloader;
+
+  /**
+   * Creates a reloadable trustmanager. The trustmanager reloads itself
+   * if the underlying trustore file has changed.
+   *
+   * @param type type of truststore file, typically 'jks'.
+   * @param location local path to the truststore file.
+   * @param password password of the truststore file.
+   * @param reloadInterval interval to check if the truststore file has
+   * changed, in milliseconds.
+   * @throws IOException thrown if the truststore could not be initialized due
+   * to an IO error.
+   * @throws GeneralSecurityException thrown if the truststore could not be
+   * initialized due to a security error.
+   */
+  public ReloadingX509TrustManager(String type, String location,
+                                   String password, long reloadInterval)
+    throws IOException, GeneralSecurityException {
+    this.type = type;
+    file = new File(location);
+    this.password = password;
+    trustManagerRef = new AtomicReference<X509TrustManager>();
+    trustManagerRef.set(loadTrustManager());
+    this.reloadInterval = reloadInterval;
+  }
+
+  /**
+   * Starts the reloader thread.
+   */
+  public void init() {
+    reloader = new Thread(this, "Truststore reloader thread");
+    reloader.setDaemon(true);
+    running =  true;
+    reloader.start();
+  }
+
+  /**
+   * Stops the reloader thread.
+   */
+  public void destroy() {
+    running = false;
+    reloader.interrupt();
+  }
+
+  /**
+   * Returns the reload check interval.
+   *
+   * @return the reload check interval, in milliseconds.
+   */
+  public long getReloadInterval() {
+    return reloadInterval;
+  }
+
+  @Override
+  public void checkClientTrusted(X509Certificate[] chain, String authType)
+    throws CertificateException {
+    X509TrustManager tm = trustManagerRef.get();
+    if (tm != null) {
+      tm.checkClientTrusted(chain, authType);
+    } else {
+      throw new CertificateException("Unknown client chain certificate: " +
+                                     chain[0].toString());
+    }
+  }
+
+  @Override
+  public void checkServerTrusted(X509Certificate[] chain, String authType)
+    throws CertificateException {
+    X509TrustManager tm = trustManagerRef.get();
+    if (tm != null) {
+      tm.checkServerTrusted(chain, authType);
+    } else {
+      throw new CertificateException("Unknown server chain certificate: " +
+                                     chain[0].toString());
+    }
+  }
+
+  private static final X509Certificate[] EMPTY = new X509Certificate[0];
+  @Override
+  public X509Certificate[] getAcceptedIssuers() {
+    X509Certificate[] issuers = EMPTY;
+    X509TrustManager tm = trustManagerRef.get();
+    if (tm != null) {
+      issuers = tm.getAcceptedIssuers();
+    }
+    return issuers;
+  }
+
+  boolean needsReload() {
+    boolean reload = true;
+    if (file.exists()) {
+      if (file.lastModified() == lastLoaded) {
+        reload = false;
+      }
+    } else {
+      lastLoaded = 0;
+    }
+    return reload;
+  }
+
+  X509TrustManager loadTrustManager()
+  throws IOException, GeneralSecurityException {
+    X509TrustManager trustManager = null;
+    KeyStore ks = KeyStore.getInstance(type);
+    lastLoaded = file.lastModified();
+    FileInputStream in = new FileInputStream(file);
+    try {
+      ks.load(in, password.toCharArray());
+      LOG.debug("Loaded truststore '" + file + "'");
+    } finally {
+      in.close();
+    }
+
+    TrustManagerFactory trustManagerFactory =
+      TrustManagerFactory.getInstance("SunX509");
+    trustManagerFactory.init(ks);
+    TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
+    for (TrustManager trustManager1 : trustManagers) {
+      if (trustManager1 instanceof X509TrustManager) {
+        trustManager = (X509TrustManager) trustManager1;
+        break;
+      }
+    }
+    return trustManager;
+  }
+
+  @Override
+  public void run() {
+    while (running) {
+      try {
+        Thread.sleep(reloadInterval);
+      } catch (InterruptedException e) {
+        //NOP
+      }
+      if (running && needsReload()) {
+        try {
+          trustManagerRef.set(loadTrustManager());
+        } catch (Exception ex) {
+          LOG.warn("Could not load truststore (keep using existing one) : " +
+                   ex.toString(), ex);
+        }
+      }
+    }
+  }
+
+}

+ 237 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java

@@ -0,0 +1,237 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLEngine;
+import javax.net.ssl.SSLServerSocketFactory;
+import javax.net.ssl.SSLSocketFactory;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+
+/**
+ * Factory that creates SSLEngine and SSLSocketFactory instances using
+ * Hadoop configuration information.
+ * <p/>
+ * This SSLFactory uses a {@link ReloadingX509TrustManager} instance,
+ * which reloads public keys if the truststore file changes.
+ * <p/>
+ * This factory is used to configure HTTPS in Hadoop HTTP based endpoints, both
+ * client and server.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SSLFactory {
+
+  @InterfaceAudience.Private
+  public static enum Mode { CLIENT, SERVER }
+
+  public static final String SSL_REQUIRE_CLIENT_CERT_KEY =
+    "hadoop.ssl.require.client.cert";
+  public static final String SSL_HOSTNAME_VERIFIER_KEY =
+    "hadoop.ssl.hostname.verifier";
+  public static final String SSL_CLIENT_CONF_KEY =
+    "hadoop.ssl.client.conf";
+  public static final String SSL_SERVER_CONF_KEY =
+    "hadoop.ssl.server.conf";
+
+  public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
+
+  public static final String KEYSTORES_FACTORY_CLASS_KEY =
+    "hadoop.ssl.keystores.factory.class";
+
+  private Configuration conf;
+  private Mode mode;
+  private boolean requireClientCert;
+  private SSLContext context;
+  private HostnameVerifier hostnameVerifier;
+  private KeyStoresFactory keystoresFactory;
+
+  /**
+   * Creates an SSLFactory.
+   *
+   * @param mode SSLFactory mode, client or server.
+   * @param conf Hadoop configuration from where the SSLFactory configuration
+   * will be read.
+   */
+  public SSLFactory(Mode mode, Configuration conf) {
+    this.conf = conf;
+    if (mode == null) {
+      throw new IllegalArgumentException("mode cannot be NULL");
+    }
+    this.mode = mode;
+    requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
+                                        DEFAULT_SSL_REQUIRE_CLIENT_CERT);
+    Configuration sslConf = readSSLConfiguration(mode);
+
+    Class<? extends KeyStoresFactory> klass
+      = conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
+                      FileBasedKeyStoresFactory.class, KeyStoresFactory.class);
+    keystoresFactory = ReflectionUtils.newInstance(klass, sslConf);
+  }
+
+  private Configuration readSSLConfiguration(Mode mode) {
+    Configuration sslConf = new Configuration(false);
+    sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert);
+    String sslConfResource;
+    if (mode == Mode.CLIENT) {
+      sslConfResource = conf.get(SSL_CLIENT_CONF_KEY, "ssl-client.xml");
+    } else {
+      sslConfResource = conf.get(SSL_SERVER_CONF_KEY, "ssl-server.xml");
+    }
+    sslConf.addResource(sslConfResource);
+    return sslConf;
+  }
+
+  /**
+   * Initializes the factory.
+   *
+   * @throws  GeneralSecurityException thrown if an SSL initialization error
+   * happened.
+   * @throws IOException thrown if an IO error happened while reading the SSL
+   * configuration.
+   */
+  public void init() throws GeneralSecurityException, IOException {
+    keystoresFactory.init(mode);
+    context = SSLContext.getInstance("TLS");
+    context.init(keystoresFactory.getKeyManagers(),
+                 keystoresFactory.getTrustManagers(), null);
+
+    hostnameVerifier = getHostnameVerifier(conf);
+  }
+
+  private HostnameVerifier getHostnameVerifier(Configuration conf)
+    throws GeneralSecurityException, IOException {
+    HostnameVerifier hostnameVerifier;
+    String verifier =
+      conf.get(SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT").trim().toUpperCase();
+    if (verifier.equals("DEFAULT")) {
+      hostnameVerifier = SSLHostnameVerifier.DEFAULT;
+    } else if (verifier.equals("DEFAULT_AND_LOCALHOST")) {
+      hostnameVerifier = SSLHostnameVerifier.DEFAULT_AND_LOCALHOST;
+    } else if (verifier.equals("STRICT")) {
+      hostnameVerifier = SSLHostnameVerifier.STRICT;
+    } else if (verifier.equals("STRICT_IE6")) {
+      hostnameVerifier = SSLHostnameVerifier.STRICT_IE6;
+    } else if (verifier.equals("ALLOW_ALL")) {
+      hostnameVerifier = SSLHostnameVerifier.ALLOW_ALL;
+    } else {
+      throw new GeneralSecurityException("Invalid hostname verifier: " +
+                                         verifier);
+    }
+    return hostnameVerifier;
+  }
+
+  /**
+   * Releases any resources being used.
+   */
+  public void destroy() {
+    keystoresFactory.destroy();
+  }
+  /**
+   * Returns the SSLFactory KeyStoresFactory instance.
+   *
+   * @return the SSLFactory KeyStoresFactory instance.
+   */
+  public KeyStoresFactory getKeystoresFactory() {
+    return keystoresFactory;
+  }
+
+  /**
+   * Returns a configured SSLEngine.
+   *
+   * @return the configured SSLEngine.
+   * @throws GeneralSecurityException thrown if the SSL engine could not
+   * be initialized.
+   * @throws IOException thrown if and IO error occurred while loading
+   * the server keystore.
+   */
+  public SSLEngine createSSLEngine()
+    throws GeneralSecurityException, IOException {
+    SSLEngine sslEngine = context.createSSLEngine();
+    if (mode == Mode.CLIENT) {
+      sslEngine.setUseClientMode(true);
+    } else {
+      sslEngine.setUseClientMode(false);
+      sslEngine.setNeedClientAuth(requireClientCert);
+    }
+    return sslEngine;
+  }
+
+  /**
+   * Returns a configured SSLServerSocketFactory.
+   *
+   * @return the configured SSLSocketFactory.
+   * @throws GeneralSecurityException thrown if the SSLSocketFactory could not
+   * be initialized.
+   * @throws IOException thrown if and IO error occurred while loading
+   * the server keystore.
+   */
+  public SSLServerSocketFactory createSSLServerSocketFactory()
+    throws GeneralSecurityException, IOException {
+    if (mode != Mode.SERVER) {
+      throw new IllegalStateException("Factory is in CLIENT mode");
+    }
+    return context.getServerSocketFactory();
+  }
+
+  /**
+   * Returns a configured SSLSocketFactory.
+   *
+   * @return the configured SSLSocketFactory.
+   * @throws GeneralSecurityException thrown if the SSLSocketFactory could not
+   * be initialized.
+   * @throws IOException thrown if and IO error occurred while loading
+   * the server keystore.
+   */
+  public SSLSocketFactory createSSLSocketFactory()
+    throws GeneralSecurityException, IOException {
+    if (mode != Mode.CLIENT) {
+      throw new IllegalStateException("Factory is in CLIENT mode");
+    }
+    return context.getSocketFactory();
+  }
+
+  /**
+   * Returns the hostname verifier it should be used in HttpsURLConnections.
+   *
+   * @return the hostname verifier.
+   */
+  public HostnameVerifier getHostnameVerifier() {
+    if (mode != Mode.CLIENT) {
+      throw new IllegalStateException("Factory is in CLIENT mode");
+    }
+    return hostnameVerifier;
+  }
+
+  /**
+   * Returns if client certificates are required or not.
+   *
+   * @return if client certificates are required or not.
+   */
+  public boolean isClientCertRequired() {
+    return requireClientCert;
+  }
+
+}

+ 585 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java

@@ -0,0 +1,585 @@
+/*
+ * $HeadURL$
+ * $Revision$
+ * $Date$
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation.  For more
+ * information on the Apache Software Foundation, please see
+ * <http://www.apache.org/>.
+ *
+ */
+
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateParsingException;
+import java.security.cert.X509Certificate;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.StringTokenizer;
+import java.util.TreeSet;
+
+import javax.net.ssl.SSLException;
+import javax.net.ssl.SSLPeerUnverifiedException;
+import javax.net.ssl.SSLSession;
+import javax.net.ssl.SSLSocket;
+
+/**
+ ************************************************************************
+ * Copied from the not-yet-commons-ssl project at
+ * http://juliusdavies.ca/commons-ssl/
+ * This project is not yet in Apache, but it is Apache 2.0 licensed.
+ ************************************************************************
+ * Interface for checking if a hostname matches the names stored inside the
+ * server's X.509 certificate.  Correctly implements
+ * javax.net.ssl.HostnameVerifier, but that interface is not recommended.
+ * Instead we added several check() methods that take SSLSocket,
+ * or X509Certificate, or ultimately (they all end up calling this one),
+ * String.  (It's easier to supply JUnit with Strings instead of mock
+ * SSLSession objects!)
+ * </p><p>Our check() methods throw exceptions if the name is
+ * invalid, whereas javax.net.ssl.HostnameVerifier just returns true/false.
+ * <p/>
+ * We provide the HostnameVerifier.DEFAULT, HostnameVerifier.STRICT, and
+ * HostnameVerifier.ALLOW_ALL implementations.  We also provide the more
+ * specialized HostnameVerifier.DEFAULT_AND_LOCALHOST, as well as
+ * HostnameVerifier.STRICT_IE6.  But feel free to define your own
+ * implementations!
+ * <p/>
+ * Inspired by Sebastian Hauer's original StrictSSLProtocolSocketFactory in the
+ * HttpClient "contrib" repository.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
+
+    boolean verify(String host, SSLSession session);
+
+    void check(String host, SSLSocket ssl) throws IOException;
+
+    void check(String host, X509Certificate cert) throws SSLException;
+
+    void check(String host, String[] cns, String[] subjectAlts)
+        throws SSLException;
+
+    void check(String[] hosts, SSLSocket ssl) throws IOException;
+
+    void check(String[] hosts, X509Certificate cert) throws SSLException;
+
+
+    /**
+     * Checks to see if the supplied hostname matches any of the supplied CNs
+     * or "DNS" Subject-Alts.  Most implementations only look at the first CN,
+     * and ignore any additional CNs.  Most implementations do look at all of
+     * the "DNS" Subject-Alts. The CNs or Subject-Alts may contain wildcards
+     * according to RFC 2818.
+     *
+     * @param cns         CN fields, in order, as extracted from the X.509
+     *                    certificate.
+     * @param subjectAlts Subject-Alt fields of type 2 ("DNS"), as extracted
+     *                    from the X.509 certificate.
+     * @param hosts       The array of hostnames to verify.
+     * @throws SSLException If verification failed.
+     */
+    void check(String[] hosts, String[] cns, String[] subjectAlts)
+        throws SSLException;
+
+
+    /**
+     * The DEFAULT HostnameVerifier works the same way as Curl and Firefox.
+     * <p/>
+     * The hostname must match either the first CN, or any of the subject-alts.
+     * A wildcard can occur in the CN, and in any of the subject-alts.
+     * <p/>
+     * The only difference between DEFAULT and STRICT is that a wildcard (such
+     * as "*.foo.com") with DEFAULT matches all subdomains, including
+     * "a.b.foo.com".
+     */
+    public final static SSLHostnameVerifier DEFAULT =
+        new AbstractVerifier() {
+            public final void check(final String[] hosts, final String[] cns,
+                                    final String[] subjectAlts)
+                throws SSLException {
+                check(hosts, cns, subjectAlts, false, false);
+            }
+
+            public final String toString() { return "DEFAULT"; }
+        };
+
+
+    /**
+     * The DEFAULT_AND_LOCALHOST HostnameVerifier works like the DEFAULT
+     * one with one additional relaxation:  a host of "localhost",
+     * "localhost.localdomain", "127.0.0.1", "::1" will always pass, no matter
+     * what is in the server's certificate.
+     */
+    public final static SSLHostnameVerifier DEFAULT_AND_LOCALHOST =
+        new AbstractVerifier() {
+            public final void check(final String[] hosts, final String[] cns,
+                                    final String[] subjectAlts)
+                throws SSLException {
+                if (isLocalhost(hosts[0])) {
+                    return;
+                }
+                check(hosts, cns, subjectAlts, false, false);
+            }
+
+            public final String toString() { return "DEFAULT_AND_LOCALHOST"; }
+        };
+
+    /**
+     * The STRICT HostnameVerifier works the same way as java.net.URL in Sun
+     * Java 1.4, Sun Java 5, Sun Java 6.  It's also pretty close to IE6.
+     * This implementation appears to be compliant with RFC 2818 for dealing
+     * with wildcards.
+     * <p/>
+     * The hostname must match either the first CN, or any of the subject-alts.
+     * A wildcard can occur in the CN, and in any of the subject-alts.  The
+     * one divergence from IE6 is how we only check the first CN.  IE6 allows
+     * a match against any of the CNs present.  We decided to follow in
+     * Sun Java 1.4's footsteps and only check the first CN.
+     * <p/>
+     * A wildcard such as "*.foo.com" matches only subdomains in the same
+     * level, for example "a.foo.com".  It does not match deeper subdomains
+     * such as "a.b.foo.com".
+     */
+    public final static SSLHostnameVerifier STRICT =
+        new AbstractVerifier() {
+            public final void check(final String[] host, final String[] cns,
+                                    final String[] subjectAlts)
+                throws SSLException {
+                check(host, cns, subjectAlts, false, true);
+            }
+
+            public final String toString() { return "STRICT"; }
+        };
+
+    /**
+     * The STRICT_IE6 HostnameVerifier works just like the STRICT one with one
+     * minor variation:  the hostname can match against any of the CN's in the
+     * server's certificate, not just the first one.  This behaviour is
+     * identical to IE6's behaviour.
+     */
+    public final static SSLHostnameVerifier STRICT_IE6 =
+        new AbstractVerifier() {
+            public final void check(final String[] host, final String[] cns,
+                                    final String[] subjectAlts)
+                throws SSLException {
+                check(host, cns, subjectAlts, true, true);
+            }
+
+            public final String toString() { return "STRICT_IE6"; }
+        };
+
+    /**
+     * The ALLOW_ALL HostnameVerifier essentially turns hostname verification
+     * off.  This implementation is a no-op, and never throws the SSLException.
+     */
+    public final static SSLHostnameVerifier ALLOW_ALL =
+        new AbstractVerifier() {
+            public final void check(final String[] host, final String[] cns,
+                                    final String[] subjectAlts) {
+                // Allow everything - so never blowup.
+            }
+
+            public final String toString() { return "ALLOW_ALL"; }
+        };
+
+    @SuppressWarnings("unchecked")
+    abstract class AbstractVerifier implements SSLHostnameVerifier {
+
+        /**
+         * This contains a list of 2nd-level domains that aren't allowed to
+         * have wildcards when combined with country-codes.
+         * For example: [*.co.uk].
+         * <p/>
+         * The [*.co.uk] problem is an interesting one.  Should we just hope
+         * that CA's would never foolishly allow such a certificate to happen?
+         * Looks like we're the only implementation guarding against this.
+         * Firefox, Curl, Sun Java 1.4, 5, 6 don't bother with this check.
+         */
+        private final static String[] BAD_COUNTRY_2LDS =
+            {"ac", "co", "com", "ed", "edu", "go", "gouv", "gov", "info",
+                "lg", "ne", "net", "or", "org"};
+
+        private final static String[] LOCALHOSTS = {"::1", "127.0.0.1",
+            "localhost",
+            "localhost.localdomain"};
+
+
+        static {
+            // Just in case developer forgot to manually sort the array.  :-)
+            Arrays.sort(BAD_COUNTRY_2LDS);
+            Arrays.sort(LOCALHOSTS);
+        }
+
+        protected AbstractVerifier() {}
+
+        /**
+         * The javax.net.ssl.HostnameVerifier contract.
+         *
+         * @param host    'hostname' we used to create our socket
+         * @param session SSLSession with the remote server
+         * @return true if the host matched the one in the certificate.
+         */
+        public boolean verify(String host, SSLSession session) {
+            try {
+                Certificate[] certs = session.getPeerCertificates();
+                X509Certificate x509 = (X509Certificate) certs[0];
+                check(new String[]{host}, x509);
+                return true;
+            }
+            catch (SSLException e) {
+                return false;
+            }
+        }
+
+        public void check(String host, SSLSocket ssl) throws IOException {
+            check(new String[]{host}, ssl);
+        }
+
+        public void check(String host, X509Certificate cert)
+            throws SSLException {
+            check(new String[]{host}, cert);
+        }
+
+        public void check(String host, String[] cns, String[] subjectAlts)
+            throws SSLException {
+            check(new String[]{host}, cns, subjectAlts);
+        }
+
+        public void check(String host[], SSLSocket ssl)
+            throws IOException {
+            if (host == null) {
+                throw new NullPointerException("host to verify is null");
+            }
+
+            SSLSession session = ssl.getSession();
+            if (session == null) {
+                // In our experience this only happens under IBM 1.4.x when
+                // spurious (unrelated) certificates show up in the server'
+                // chain.  Hopefully this will unearth the real problem:
+                InputStream in = ssl.getInputStream();
+                in.available();
+                /*
+                  If you're looking at the 2 lines of code above because
+                  you're running into a problem, you probably have two
+                  options:
+
+                    #1.  Clean up the certificate chain that your server
+                         is presenting (e.g. edit "/etc/apache2/server.crt"
+                         or wherever it is your server's certificate chain
+                         is defined).
+
+                                               OR
+
+                    #2.   Upgrade to an IBM 1.5.x or greater JVM, or switch
+                          to a non-IBM JVM.
+                */
+
+                // If ssl.getInputStream().available() didn't cause an
+                // exception, maybe at least now the session is available?
+                session = ssl.getSession();
+                if (session == null) {
+                    // If it's still null, probably a startHandshake() will
+                    // unearth the real problem.
+                    ssl.startHandshake();
+
+                    // Okay, if we still haven't managed to cause an exception,
+                    // might as well go for the NPE.  Or maybe we're okay now?
+                    session = ssl.getSession();
+                }
+            }
+            Certificate[] certs;
+            try {
+                certs = session.getPeerCertificates();
+            } catch (SSLPeerUnverifiedException spue) {
+                InputStream in = ssl.getInputStream();
+                in.available();
+                // Didn't trigger anything interesting?  Okay, just throw
+                // original.
+                throw spue;
+            }
+            X509Certificate x509 = (X509Certificate) certs[0];
+            check(host, x509);
+        }
+
+        public void check(String[] host, X509Certificate cert)
+            throws SSLException {
+            String[] cns = Certificates.getCNs(cert);
+            String[] subjectAlts = Certificates.getDNSSubjectAlts(cert);
+            check(host, cns, subjectAlts);
+        }
+
+        public void check(final String[] hosts, final String[] cns,
+                          final String[] subjectAlts, final boolean ie6,
+                          final boolean strictWithSubDomains)
+            throws SSLException {
+            // Build up lists of allowed hosts For logging/debugging purposes.
+            StringBuffer buf = new StringBuffer(32);
+            buf.append('<');
+            for (int i = 0; i < hosts.length; i++) {
+                String h = hosts[i];
+                h = h != null ? h.trim().toLowerCase() : "";
+                hosts[i] = h;
+                if (i > 0) {
+                    buf.append('/');
+                }
+                buf.append(h);
+            }
+            buf.append('>');
+            String hostnames = buf.toString();
+            // Build the list of names we're going to check.  Our DEFAULT and
+            // STRICT implementations of the HostnameVerifier only use the
+            // first CN provided.  All other CNs are ignored.
+            // (Firefox, wget, curl, Sun Java 1.4, 5, 6 all work this way).
+            TreeSet names = new TreeSet();
+            if (cns != null && cns.length > 0 && cns[0] != null) {
+                names.add(cns[0]);
+                if (ie6) {
+                    for (int i = 1; i < cns.length; i++) {
+                        names.add(cns[i]);
+                    }
+                }
+            }
+            if (subjectAlts != null) {
+                for (int i = 0; i < subjectAlts.length; i++) {
+                    if (subjectAlts[i] != null) {
+                        names.add(subjectAlts[i]);
+                    }
+                }
+            }
+            if (names.isEmpty()) {
+                String msg = "Certificate for " + hosts[0] + " doesn't contain CN or DNS subjectAlt";
+                throw new SSLException(msg);
+            }
+
+            // StringBuffer for building the error message.
+            buf = new StringBuffer();
+
+            boolean match = false;
+            out:
+            for (Iterator it = names.iterator(); it.hasNext();) {
+                // Don't trim the CN, though!
+                String cn = (String) it.next();
+                cn = cn.toLowerCase();
+                // Store CN in StringBuffer in case we need to report an error.
+                buf.append(" <");
+                buf.append(cn);
+                buf.append('>');
+                if (it.hasNext()) {
+                    buf.append(" OR");
+                }
+
+                // The CN better have at least two dots if it wants wildcard
+                // action.  It also can't be [*.co.uk] or [*.co.jp] or
+                // [*.org.uk], etc...
+                boolean doWildcard = cn.startsWith("*.") &&
+                                     cn.lastIndexOf('.') >= 0 &&
+                                     !isIP4Address(cn) &&
+                                     acceptableCountryWildcard(cn);
+
+                for (int i = 0; i < hosts.length; i++) {
+                    final String hostName = hosts[i].trim().toLowerCase();
+                    if (doWildcard) {
+                        match = hostName.endsWith(cn.substring(1));
+                        if (match && strictWithSubDomains) {
+                            // If we're in strict mode, then [*.foo.com] is not
+                            // allowed to match [a.b.foo.com]
+                            match = countDots(hostName) == countDots(cn);
+                        }
+                    } else {
+                        match = hostName.equals(cn);
+                    }
+                    if (match) {
+                        break out;
+                    }
+                }
+            }
+            if (!match) {
+                throw new SSLException("hostname in certificate didn't match: " + hostnames + " !=" + buf);
+            }
+        }
+
+        public static boolean isIP4Address(final String cn) {
+            boolean isIP4 = true;
+            String tld = cn;
+            int x = cn.lastIndexOf('.');
+            // We only bother analyzing the characters after the final dot
+            // in the name.
+            if (x >= 0 && x + 1 < cn.length()) {
+                tld = cn.substring(x + 1);
+            }
+            for (int i = 0; i < tld.length(); i++) {
+                if (!Character.isDigit(tld.charAt(0))) {
+                    isIP4 = false;
+                    break;
+                }
+            }
+            return isIP4;
+        }
+
+        public static boolean acceptableCountryWildcard(final String cn) {
+            int cnLen = cn.length();
+            if (cnLen >= 7 && cnLen <= 9) {
+                // Look for the '.' in the 3rd-last position:
+                if (cn.charAt(cnLen - 3) == '.') {
+                    // Trim off the [*.] and the [.XX].
+                    String s = cn.substring(2, cnLen - 3);
+                    // And test against the sorted array of bad 2lds:
+                    int x = Arrays.binarySearch(BAD_COUNTRY_2LDS, s);
+                    return x < 0;
+                }
+            }
+            return true;
+        }
+
+        public static boolean isLocalhost(String host) {
+            host = host != null ? host.trim().toLowerCase() : "";
+            if (host.startsWith("::1")) {
+                int x = host.lastIndexOf('%');
+                if (x >= 0) {
+                    host = host.substring(0, x);
+                }
+            }
+            int x = Arrays.binarySearch(LOCALHOSTS, host);
+            return x >= 0;
+        }
+
+        /**
+         * Counts the number of dots "." in a string.
+         *
+         * @param s string to count dots from
+         * @return number of dots
+         */
+        public static int countDots(final String s) {
+            int count = 0;
+            for (int i = 0; i < s.length(); i++) {
+                if (s.charAt(i) == '.') {
+                    count++;
+                }
+            }
+            return count;
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    static class Certificates {
+      public static String[] getCNs(X509Certificate cert) {
+        LinkedList cnList = new LinkedList();
+        /*
+          Sebastian Hauer's original StrictSSLProtocolSocketFactory used
+          getName() and had the following comment:
+
+             Parses a X.500 distinguished name for the value of the
+             "Common Name" field.  This is done a bit sloppy right
+             now and should probably be done a bit more according to
+             <code>RFC 2253</code>.
+
+           I've noticed that toString() seems to do a better job than
+           getName() on these X500Principal objects, so I'm hoping that
+           addresses Sebastian's concern.
+
+           For example, getName() gives me this:
+           1.2.840.113549.1.9.1=#16166a756c6975736461766965734063756362632e636f6d
+
+           whereas toString() gives me this:
+           EMAILADDRESS=juliusdavies@cucbc.com
+
+           Looks like toString() even works with non-ascii domain names!
+           I tested it with "&#x82b1;&#x5b50;.co.jp" and it worked fine.
+          */
+        String subjectPrincipal = cert.getSubjectX500Principal().toString();
+        StringTokenizer st = new StringTokenizer(subjectPrincipal, ",");
+        while (st.hasMoreTokens()) {
+            String tok = st.nextToken();
+            int x = tok.indexOf("CN=");
+            if (x >= 0) {
+                cnList.add(tok.substring(x + 3));
+            }
+        }
+        if (!cnList.isEmpty()) {
+            String[] cns = new String[cnList.size()];
+            cnList.toArray(cns);
+            return cns;
+        } else {
+            return null;
+        }
+      }
+
+
+      /**
+       * Extracts the array of SubjectAlt DNS names from an X509Certificate.
+       * Returns null if there aren't any.
+       * <p/>
+       * Note:  Java doesn't appear able to extract international characters
+       * from the SubjectAlts.  It can only extract international characters
+       * from the CN field.
+       * <p/>
+       * (Or maybe the version of OpenSSL I'm using to test isn't storing the
+       * international characters correctly in the SubjectAlts?).
+       *
+       * @param cert X509Certificate
+       * @return Array of SubjectALT DNS names stored in the certificate.
+       */
+      public static String[] getDNSSubjectAlts(X509Certificate cert) {
+          LinkedList subjectAltList = new LinkedList();
+          Collection c = null;
+          try {
+              c = cert.getSubjectAlternativeNames();
+          }
+          catch (CertificateParsingException cpe) {
+              // Should probably log.debug() this?
+              cpe.printStackTrace();
+          }
+          if (c != null) {
+              Iterator it = c.iterator();
+              while (it.hasNext()) {
+                  List list = (List) it.next();
+                  int type = ((Integer) list.get(0)).intValue();
+                  // If type is 2, then we've got a dNSName
+                  if (type == 2) {
+                      String s = (String) list.get(1);
+                      subjectAltList.add(s);
+                  }
+              }
+          }
+          if (!subjectAltList.isEmpty()) {
+              String[] subjectAlts = new String[subjectAltList.size()];
+              subjectAltList.toArray(subjectAlts);
+              return subjectAlts;
+          } else {
+              return null;
+          }
+      }
+    }
+
+}

+ 8 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
@@ -88,14 +89,17 @@ extends TokenIdentifier {
     if ( (owner == null) || ("".equals(owner.toString()))) {
     if ( (owner == null) || ("".equals(owner.toString()))) {
       return null;
       return null;
     }
     }
+    final UserGroupInformation realUgi;
+    final UserGroupInformation ugi;
     if ((realUser == null) || ("".equals(realUser.toString()))
     if ((realUser == null) || ("".equals(realUser.toString()))
         || realUser.equals(owner)) {
         || realUser.equals(owner)) {
-      return UserGroupInformation.createRemoteUser(owner.toString());
+      ugi = realUgi = UserGroupInformation.createRemoteUser(owner.toString());
     } else {
     } else {
-      UserGroupInformation realUgi = UserGroupInformation
-          .createRemoteUser(realUser.toString());
-      return UserGroupInformation.createProxyUser(owner.toString(), realUgi);
+      realUgi = UserGroupInformation.createRemoteUser(realUser.toString());
+      ugi = UserGroupInformation.createProxyUser(owner.toString(), realUgi);
     }
     }
+    realUgi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
+    return ugi;
   }
   }
 
 
   public Text getOwner() {
   public Text getOwner() {

+ 48 - 1
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -165,7 +165,7 @@
 
 
 <property>
 <property>
   <name>hadoop.security.group.mapping.ldap.search.filter.user</name>
   <name>hadoop.security.group.mapping.ldap.search.filter.user</name>
-  <value>(&amp;(objectClass=user)(sAMAccountName={0})</value>
+  <value>(&amp;(objectClass=user)(sAMAccountName={0}))</value>
   <description>
   <description>
     An additional filter to use when searching for LDAP users. The default will
     An additional filter to use when searching for LDAP users. The default will
     usually be appropriate for Active Directory installations. If connecting to
     usually be appropriate for Active Directory installations. If connecting to
@@ -1026,4 +1026,51 @@
   <name>hadoop.http.staticuser.user</name>
   <name>hadoop.http.staticuser.user</name>
   <value>dr.who</value>
   <value>dr.who</value>
 </property>
 </property>
+
+<!-- SSLFactory configuration -->
+
+<property>
+  <name>hadoop.ssl.keystores.factory.class</name>
+  <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+  <description>
+    The keystores factory to use for retrieving certificates.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.require.client.cert</name>
+  <value>false</value>
+  <description>Whether client certificates are required</description>
+</property>
+
+<property>
+  <name>hadoop.ssl.hostname.verifier</name>
+  <value>DEFAULT</value>
+  <description>
+    The hostname verifier to provide for HttpsURLConnections.
+    Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
+    ALLOW_ALL
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.server.conf</name>
+  <value>ssl-server.xml</value>
+  <description>
+    Resource file from which ssl server keystore information will be extracted.
+    This file is looked up in the classpath, typically it should be in Hadoop
+    conf/ directory.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.client.conf</name>
+  <value>ssl-client.xml</value>
+  <description>
+    Resource file from which ssl client keystore information will be extracted
+    This file is looked up in the classpath, typically it should be in Hadoop
+    conf/ directory.
+  </description>
+</property>
+
 </configuration>
 </configuration>

+ 141 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java

@@ -34,8 +34,8 @@ import org.junit.Test;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.Semaphore;
 
 
-import static org.mockito.Mockito.mock;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
 
 
 
 
 public class TestFileSystemCaching {
 public class TestFileSystemCaching {
@@ -49,6 +49,65 @@ public class TestFileSystemCaching {
     assertSame(fs1, fs2);
     assertSame(fs1, fs2);
   }
   }
 
 
+  static class DefaultFs extends LocalFileSystem {
+    URI uri;
+    @Override
+    public void initialize(URI uri, Configuration conf) {
+      this.uri = uri;
+    }
+    @Override
+    public URI getUri() {
+      return uri;
+    }
+  }
+  
+  @Test
+  public void testDefaultFsUris() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
+    final URI defaultUri = URI.create("defaultfs://host");
+    FileSystem.setDefaultUri(conf, defaultUri);
+    FileSystem fs = null;
+    
+    // sanity check default fs
+    final FileSystem defaultFs = FileSystem.get(conf);
+    assertEquals(defaultUri, defaultFs.getUri());
+    
+    // has scheme, no auth
+    fs = FileSystem.get(URI.create("defaultfs:/"), conf);
+    assertSame(defaultFs, fs);
+    fs = FileSystem.get(URI.create("defaultfs:///"), conf);
+    assertSame(defaultFs, fs);
+    
+    // has scheme, same auth
+    fs = FileSystem.get(URI.create("defaultfs://host"), conf);
+    assertSame(defaultFs, fs);
+    
+    // has scheme, different auth
+    fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
+    assertNotSame(defaultFs, fs);
+    
+    // no scheme, no auth
+    fs = FileSystem.get(URI.create("/"), conf);
+    assertSame(defaultFs, fs);
+    
+    // no scheme, same auth
+    try {
+      fs = FileSystem.get(URI.create("//host"), conf);
+      fail("got fs with auth but no scheme");
+    } catch (Exception e) {
+      assertEquals("No FileSystem for scheme: null", e.getMessage());
+    }
+    
+    // no scheme, different auth
+    try {
+      fs = FileSystem.get(URI.create("//host2"), conf);
+      fail("got fs with auth but no scheme");
+    } catch (Exception e) {
+      assertEquals("No FileSystem for scheme: null", e.getMessage());
+    }
+  }
+  
   public static class InitializeForeverFileSystem extends LocalFileSystem {
   public static class InitializeForeverFileSystem extends LocalFileSystem {
     final static Semaphore sem = new Semaphore(0);
     final static Semaphore sem = new Semaphore(0);
     public void initialize(URI uri, Configuration conf) throws IOException {
     public void initialize(URI uri, Configuration conf) throws IOException {
@@ -208,4 +267,84 @@ public class TestFileSystemCaching {
     });
     });
     assertNotSame(fsA, fsA1);
     assertNotSame(fsA, fsA1);
   }
   }
+  
+  @Test
+  public void testDelete() throws IOException {
+    FileSystem mockFs = mock(FileSystem.class);
+    FileSystem fs = new FilterFileSystem(mockFs);    
+    Path path = new Path("/a");
+
+    fs.delete(path, false);
+    verify(mockFs).delete(eq(path), eq(false));
+    reset(mockFs);
+    fs.delete(path, true);
+    verify(mockFs).delete(eq(path), eq(true));
+  }
+
+  @Test
+  public void testDeleteOnExit() throws IOException {
+    FileSystem mockFs = mock(FileSystem.class);
+    FileSystem fs = new FilterFileSystem(mockFs);
+    Path path = new Path("/a");
+
+    // delete on close if path does exist
+    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+    assertTrue(fs.deleteOnExit(path));
+    verify(mockFs).getFileStatus(eq(path));
+    reset(mockFs);
+    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+    fs.close();
+    verify(mockFs).getFileStatus(eq(path));
+    verify(mockFs).delete(eq(path), eq(true));
+  }
+
+  @Test
+  public void testDeleteOnExitFNF() throws IOException {
+    FileSystem mockFs = mock(FileSystem.class);
+    FileSystem fs = new FilterFileSystem(mockFs);
+    Path path = new Path("/a");
+
+    // don't delete on close if path doesn't exist
+    assertFalse(fs.deleteOnExit(path));
+    verify(mockFs).getFileStatus(eq(path));
+    reset(mockFs);
+    fs.close();
+    verify(mockFs, never()).getFileStatus(eq(path));
+    verify(mockFs, never()).delete(any(Path.class), anyBoolean());
+  }
+
+
+  @Test
+  public void testDeleteOnExitRemoved() throws IOException {
+    FileSystem mockFs = mock(FileSystem.class);
+    FileSystem fs = new FilterFileSystem(mockFs);
+    Path path = new Path("/a");
+
+    // don't delete on close if path existed, but later removed
+    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+    assertTrue(fs.deleteOnExit(path));
+    verify(mockFs).getFileStatus(eq(path));
+    reset(mockFs);
+    fs.close();
+    verify(mockFs).getFileStatus(eq(path));
+    verify(mockFs, never()).delete(any(Path.class), anyBoolean());
+  }
+
+  @Test
+  public void testCancelDeleteOnExit() throws IOException {
+    FileSystem mockFs = mock(FileSystem.class);
+    FileSystem fs = new FilterFileSystem(mockFs);
+    Path path = new Path("/a");
+
+    // don't delete on close if path existed, but later cancelled
+    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+    assertTrue(fs.deleteOnExit(path));
+    verify(mockFs).getFileStatus(eq(path));
+    assertTrue(fs.cancelDeleteOnExit(path));
+    assertFalse(fs.cancelDeleteOnExit(path)); // false because not registered
+    reset(mockFs);
+    fs.close();
+    verify(mockFs, never()).getFileStatus(any(Path.class));
+    verify(mockFs, never()).delete(any(Path.class), anyBoolean());
+  }
 }
 }

+ 6 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -179,7 +179,12 @@ public class TestFilterFileSystem {
     public Token<?> getDelegationToken(String renewer) throws IOException {
     public Token<?> getDelegationToken(String renewer) throws IOException {
       return null;
       return null;
     }
     }
-
+    public boolean deleteOnExit(Path f) throws IOException {
+      return false;
+    }
+    public boolean cancelDeleteOnExit(Path f) throws IOException {
+      return false;
+    }
     public String getScheme() {
     public String getScheme() {
       return "dontcheck";
       return "dontcheck";
     }
     }

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java

@@ -61,7 +61,7 @@ public class TestPath extends TestCase {
     assertEquals(pathString, new Path(pathString).toString());
     assertEquals(pathString, new Path(pathString).toString());
   }
   }
 
 
-  public void testNormalize() {
+  public void testNormalize() throws URISyntaxException {
     assertEquals("", new Path(".").toString());
     assertEquals("", new Path(".").toString());
     assertEquals("..", new Path("..").toString());
     assertEquals("..", new Path("..").toString());
     assertEquals("/", new Path("/").toString());
     assertEquals("/", new Path("/").toString());
@@ -75,6 +75,8 @@ public class TestPath extends TestCase {
     assertEquals("foo", new Path("foo/").toString());
     assertEquals("foo", new Path("foo/").toString());
     assertEquals("foo", new Path("foo//").toString());
     assertEquals("foo", new Path("foo//").toString());
     assertEquals("foo/bar", new Path("foo//bar").toString());
     assertEquals("foo/bar", new Path("foo//bar").toString());
+    assertEquals("hdfs://foo/foo2/bar/baz/",
+        new Path(new URI("hdfs://foo//foo2///bar/baz///")).toString());
     if (Path.WINDOWS) {
     if (Path.WINDOWS) {
       assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
       assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
     }
     }

+ 36 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
 import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
@@ -33,6 +34,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import static org.mockito.Mockito.*;
 
 
 public class TestChRootedFileSystem {
 public class TestChRootedFileSystem {
   FileSystem fSys; // The ChRoootedFs
   FileSystem fSys; // The ChRoootedFs
@@ -314,4 +316,37 @@ public class TestChRootedFileSystem {
   public void testResolvePathNonExisting() throws IOException {
   public void testResolvePathNonExisting() throws IOException {
       fSys.resolvePath(new Path("/nonExisting"));
       fSys.resolvePath(new Path("/nonExisting"));
   }
   }
-}
+  
+  @Test
+  public void testDeleteOnExitPathHandling() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+        
+    URI chrootUri = URI.create("mockfs://foo/a/b");
+    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
+    FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
+        .getRawFileSystem();
+    
+    // ensure delete propagates the correct path
+    Path chrootPath = new Path("/c");
+    Path rawPath = new Path("/a/b/c");
+    chrootFs.delete(chrootPath, false);
+    verify(mockFs).delete(eq(rawPath), eq(false));
+    reset(mockFs);
+ 
+    // fake that the path exists for deleteOnExit
+    FileStatus stat = mock(FileStatus.class);
+    when(mockFs.getFileStatus(eq(rawPath))).thenReturn(stat);
+    // ensure deleteOnExit propagates the correct path
+    chrootFs.deleteOnExit(chrootPath);
+    chrootFs.close();
+    verify(mockFs).delete(eq(rawPath), eq(true));
+  }
+
+  static class MockFileSystem extends FilterFileSystem {
+    MockFileSystem() {
+      super(mock(FileSystem.class));
+    }
+    public void initialize(URI name, Configuration conf) throws IOException {}
+  }
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java

@@ -150,7 +150,7 @@ public class TestMutableMetrics {
         info("FooNumOps", "Number of ops for stat with 5s interval"),
         info("FooNumOps", "Number of ops for stat with 5s interval"),
         (long) 2000);
         (long) 2000);
     Quantile[] quants = MutableQuantiles.quantiles;
     Quantile[] quants = MutableQuantiles.quantiles;
-    String name = "Foo%dthPercentile5sIntervalLatency";
+    String name = "Foo%dthPercentileLatency";
     String desc = "%d percentile latency with 5 second interval for stat";
     String desc = "%d percentile latency with 5 second interval for stat";
     for (Quantile q : quants) {
     for (Quantile q : quants) {
       int percentile = (int) (100 * q.quantile);
       int percentile = (int) (100 * q.quantile);
@@ -176,7 +176,7 @@ public class TestMutableMetrics {
         "Latency", 5);
         "Latency", 5);
 
 
     Quantile[] quants = MutableQuantiles.quantiles;
     Quantile[] quants = MutableQuantiles.quantiles;
-    String name = "Foo%dthPercentile5sIntervalLatency";
+    String name = "Foo%dthPercentileLatency";
     String desc = "%d percentile latency with 5 second interval for stat";
     String desc = "%d percentile latency with 5 second interval for stat";
 
 
     // Push values for three intervals
     // Push values for three intervals

+ 270 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java

@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.conf.Configuration;
+import sun.security.x509.AlgorithmId;
+import sun.security.x509.CertificateAlgorithmId;
+import sun.security.x509.CertificateIssuerName;
+import sun.security.x509.CertificateSerialNumber;
+import sun.security.x509.CertificateSubjectName;
+import sun.security.x509.CertificateValidity;
+import sun.security.x509.CertificateVersion;
+import sun.security.x509.CertificateX509Key;
+import sun.security.x509.X500Name;
+import sun.security.x509.X509CertImpl;
+import sun.security.x509.X509CertInfo;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.Writer;
+import java.math.BigInteger;
+import java.net.URL;
+import java.security.GeneralSecurityException;
+import java.security.Key;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.KeyStore;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivateKey;
+import java.security.SecureRandom;
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+
+public class KeyStoreTestUtil {
+
+  public static String getClasspathDir(Class klass) throws Exception {
+    String file = klass.getName();
+    file = file.replace('.', '/') + ".class";
+    URL url = Thread.currentThread().getContextClassLoader().getResource(file);
+    String baseDir = url.toURI().getPath();
+    baseDir = baseDir.substring(0, baseDir.length() - file.length() - 1);
+    return baseDir;
+  }
+
+  /**
+   * Create a self-signed X.509 Certificate.
+   * From http://bfo.com/blog/2011/03/08/odds_and_ends_creating_a_new_x_509_certificate.html.
+   *
+   * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB"
+   * @param pair the KeyPair
+   * @param days how many days from now the Certificate is valid for
+   * @param algorithm the signing algorithm, eg "SHA1withRSA"
+   * @return the self-signed certificate
+   * @throws IOException thrown if an IO error ocurred.
+   * @throws GeneralSecurityException thrown if an Security error ocurred.
+   */
+  public static X509Certificate generateCertificate(String dn, KeyPair pair,
+                                                    int days, String algorithm)
+    throws GeneralSecurityException, IOException {
+    PrivateKey privkey = pair.getPrivate();
+    X509CertInfo info = new X509CertInfo();
+    Date from = new Date();
+    Date to = new Date(from.getTime() + days * 86400000l);
+    CertificateValidity interval = new CertificateValidity(from, to);
+    BigInteger sn = new BigInteger(64, new SecureRandom());
+    X500Name owner = new X500Name(dn);
+
+    info.set(X509CertInfo.VALIDITY, interval);
+    info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn));
+    info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner));
+    info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner));
+    info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic()));
+    info
+      .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
+    AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid);
+    info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo));
+
+    // Sign the cert to identify the algorithm that's used.
+    X509CertImpl cert = new X509CertImpl(info);
+    cert.sign(privkey, algorithm);
+
+    // Update the algorith, and resign.
+    algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG);
+    info
+      .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
+           algo);
+    cert = new X509CertImpl(info);
+    cert.sign(privkey, algorithm);
+    return cert;
+  }
+
+  public static KeyPair generateKeyPair(String algorithm)
+    throws NoSuchAlgorithmException {
+    KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm);
+    keyGen.initialize(1024);
+    return keyGen.genKeyPair();
+  }
+
+  private static KeyStore createEmptyKeyStore()
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = KeyStore.getInstance("JKS");
+    ks.load(null, null); // initialize
+    return ks;
+  }
+
+  private static void saveKeyStore(KeyStore ks, String filename,
+                                   String password)
+    throws GeneralSecurityException, IOException {
+    FileOutputStream out = new FileOutputStream(filename);
+    try {
+      ks.store(out, password.toCharArray());
+    } finally {
+      out.close();
+    }
+  }
+
+  public static void createKeyStore(String filename,
+                                    String password, String alias,
+                                    Key privateKey, Certificate cert)
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = createEmptyKeyStore();
+    ks.setKeyEntry(alias, privateKey, password.toCharArray(),
+                   new Certificate[]{cert});
+    saveKeyStore(ks, filename, password);
+  }
+
+  public static void createTrustStore(String filename,
+                                      String password, String alias,
+                                      Certificate cert)
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = createEmptyKeyStore();
+    ks.setCertificateEntry(alias, cert);
+    saveKeyStore(ks, filename, password);
+  }
+
+  public static <T extends Certificate> void createTrustStore(
+    String filename, String password, Map<String, T> certs)
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = createEmptyKeyStore();
+    for (Map.Entry<String, T> cert : certs.entrySet()) {
+      ks.setCertificateEntry(cert.getKey(), cert.getValue());
+    }
+    saveKeyStore(ks, filename, password);
+  }
+
+  public static void cleanupSSLConfig(String keystoresDir, String sslConfDir)
+    throws Exception {
+    File f = new File(keystoresDir + "/clientKS.jks");
+    f.delete();
+    f = new File(keystoresDir + "/serverKS.jks");
+    f.delete();
+    f = new File(keystoresDir + "/trustKS.jks");
+    f.delete();
+    f = new File(sslConfDir + "/ssl-client.xml");
+    f.delete();
+    f = new File(sslConfDir +  "/ssl-server.xml");
+    f.delete();
+  }
+
+  public static void setupSSLConfig(String keystoresDir, String sslConfDir,
+                                    Configuration conf, boolean useClientCert)
+    throws Exception {
+    String clientKS = keystoresDir + "/clientKS.jks";
+    String clientPassword = "clientP";
+    String serverKS = keystoresDir + "/serverKS.jks";
+    String serverPassword = "serverP";
+    String trustKS = keystoresDir + "/trustKS.jks";
+    String trustPassword = "trustP";
+
+    File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
+    File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml");
+
+    Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
+
+    if (useClientCert) {
+      KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA");
+      X509Certificate cCert =
+        KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30,
+                                             "SHA1withRSA");
+      KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client",
+                                      cKP.getPrivate(), cCert);
+      certs.put("client", cCert);
+    }
+
+    KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA");
+    X509Certificate sCert =
+      KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30,
+                                           "SHA1withRSA");
+    KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server",
+                                    sKP.getPrivate(), sCert);
+    certs.put("server", sCert);
+
+    KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
+
+    Configuration clientSSLConf = new Configuration(false);
+    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.CLIENT,
+      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), clientKS);
+    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.CLIENT,
+      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), clientPassword);
+    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.CLIENT,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
+    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.CLIENT,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
+    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.CLIENT,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+
+    Configuration serverSSLConf = new Configuration(false);
+    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.SERVER,
+      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), serverKS);
+    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.SERVER,
+      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), serverPassword);
+    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.SERVER,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
+    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.SERVER,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
+    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
+      SSLFactory.Mode.SERVER,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+
+    Writer writer = new FileWriter(sslClientConfFile);
+    try {
+      clientSSLConf.writeXml(writer);
+    } finally {
+      writer.close();
+    }
+
+    writer = new FileWriter(sslServerConfFile);
+    try {
+      serverSSLConf.writeXml(writer);
+    } finally {
+      writer.close();
+    }
+
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
+    conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
+    conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
+    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
+  }
+
+}

+ 175 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java

@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.KeyPair;
+import java.security.cert.X509Certificate;
+import java.util.HashMap;
+import java.util.Map;
+
+import static junit.framework.Assert.assertEquals;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.createTrustStore;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateCertificate;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateKeyPair;
+
+public class TestReloadingX509TrustManager {
+
+  private static final String BASEDIR =
+    System.getProperty("test.build.data", "target/test-dir") + "/" +
+    TestReloadingX509TrustManager.class.getSimpleName();
+
+  private X509Certificate cert1;
+  private X509Certificate cert2;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+  }
+
+  @Test(expected = IOException.class)
+  public void testLoadMissingTrustStore() throws Exception {
+    String truststoreLocation = BASEDIR + "/testmissing.jks";
+
+    ReloadingX509TrustManager tm =
+      new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
+    try {
+      tm.init();
+    } finally {
+      tm.destroy();
+    }
+  }
+
+  @Test(expected = IOException.class)
+  public void testLoadCorruptTrustStore() throws Exception {
+    String truststoreLocation = BASEDIR + "/testcorrupt.jks";
+    OutputStream os = new FileOutputStream(truststoreLocation);
+    os.write(1);
+    os.close();
+
+    ReloadingX509TrustManager tm =
+      new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
+    try {
+      tm.init();
+    } finally {
+      tm.destroy();
+    }
+  }
+
+  @Test
+  public void testReload() throws Exception {
+    KeyPair kp = generateKeyPair("RSA");
+    cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
+    cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
+    String truststoreLocation = BASEDIR + "/testreload.jks";
+    createTrustStore(truststoreLocation, "password", "cert1", cert1);
+
+    ReloadingX509TrustManager tm =
+      new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
+    try {
+      tm.init();
+      assertEquals(1, tm.getAcceptedIssuers().length);
+
+      // Wait so that the file modification time is different
+      Thread.sleep((tm.getReloadInterval() + 1000));
+
+      // Add another cert
+      Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
+      certs.put("cert1", cert1);
+      certs.put("cert2", cert2);
+      createTrustStore(truststoreLocation, "password", certs);
+
+      // and wait to be sure reload has taken place
+      assertEquals(10, tm.getReloadInterval());
+
+      // Wait so that the file modification time is different
+      Thread.sleep((tm.getReloadInterval() + 200));
+
+      assertEquals(2, tm.getAcceptedIssuers().length);
+    } finally {
+      tm.destroy();
+    }
+  }
+
+  @Test
+  public void testReloadMissingTrustStore() throws Exception {
+    KeyPair kp = generateKeyPair("RSA");
+    cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
+    cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
+    String truststoreLocation = BASEDIR + "/testmissing.jks";
+    createTrustStore(truststoreLocation, "password", "cert1", cert1);
+
+    ReloadingX509TrustManager tm =
+      new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
+    try {
+      tm.init();
+      assertEquals(1, tm.getAcceptedIssuers().length);
+      X509Certificate cert = tm.getAcceptedIssuers()[0];
+      new File(truststoreLocation).delete();
+
+      // Wait so that the file modification time is different
+      Thread.sleep((tm.getReloadInterval() + 200));
+
+      assertEquals(1, tm.getAcceptedIssuers().length);
+      assertEquals(cert, tm.getAcceptedIssuers()[0]);
+    } finally {
+      tm.destroy();
+    }
+  }
+
+  @Test
+  public void testReloadCorruptTrustStore() throws Exception {
+    KeyPair kp = generateKeyPair("RSA");
+    cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
+    cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
+    String truststoreLocation = BASEDIR + "/testcorrupt.jks";
+    createTrustStore(truststoreLocation, "password", "cert1", cert1);
+
+    ReloadingX509TrustManager tm =
+      new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
+    try {
+      tm.init();
+      assertEquals(1, tm.getAcceptedIssuers().length);
+      X509Certificate cert = tm.getAcceptedIssuers()[0];
+
+      OutputStream os = new FileOutputStream(truststoreLocation);
+      os.write(1);
+      os.close();
+      new File(truststoreLocation).setLastModified(System.currentTimeMillis() -
+                                                   1000);
+
+      // Wait so that the file modification time is different
+      Thread.sleep((tm.getReloadInterval() + 200));
+
+      assertEquals(1, tm.getAcceptedIssuers().length);
+      assertEquals(cert, tm.getAcceptedIssuers()[0]);
+    } finally {
+      tm.destroy();
+    }
+  }
+
+}

+ 164 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java

@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.ssl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URL;
+import java.security.GeneralSecurityException;
+
+public class TestSSLFactory {
+
+  private static final String BASEDIR =
+    System.getProperty("test.build.dir", "target/test-dir") + "/" +
+    TestSSLFactory.class.getSimpleName();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+  }
+
+  private Configuration createConfiguration(boolean clientCert)
+    throws Exception {
+    Configuration conf = new Configuration();
+    String keystoresDir = new File(BASEDIR).getAbsolutePath();
+    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, clientCert);
+    return conf;
+  }
+
+  @After
+  @Before
+  public void cleanUp() throws Exception {
+    String keystoresDir = new File(BASEDIR).getAbsolutePath();
+    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfsDir);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void clientMode() throws Exception {
+    Configuration conf = createConfiguration(false);
+    SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    try {
+      sslFactory.init();
+      Assert.assertNotNull(sslFactory.createSSLSocketFactory());
+      Assert.assertNotNull(sslFactory.getHostnameVerifier());
+      sslFactory.createSSLServerSocketFactory();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
+
+  private void serverMode(boolean clientCert, boolean socket) throws Exception {
+    Configuration conf = createConfiguration(clientCert);
+    SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+    try {
+      sslFactory.init();
+      Assert.assertNotNull(sslFactory.createSSLServerSocketFactory());
+      Assert.assertEquals(clientCert, sslFactory.isClientCertRequired());
+      if (socket) {
+        sslFactory.createSSLSocketFactory();
+      } else {
+        sslFactory.getHostnameVerifier();
+      }
+    } finally {
+      sslFactory.destroy();
+    }
+  }
+
+
+  @Test(expected = IllegalStateException.class)
+  public void serverModeWithoutClientCertsSocket() throws Exception {
+    serverMode(false, true);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void serverModeWithClientCertsSocket() throws Exception {
+    serverMode(true, true);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void serverModeWithoutClientCertsVerifier() throws Exception {
+    serverMode(false, false);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void serverModeWithClientCertsVerifier() throws Exception {
+    serverMode(true, false);
+  }
+
+  @Test
+  public void validHostnameVerifier() throws Exception {
+    Configuration conf = createConfiguration(false);
+    conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
+    SSLFactory sslFactory = new
+      SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    sslFactory.init();
+    Assert.assertEquals("DEFAULT", sslFactory.getHostnameVerifier().toString());
+    sslFactory.destroy();
+
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
+    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    sslFactory.init();
+    Assert.assertEquals("ALLOW_ALL",
+                        sslFactory.getHostnameVerifier().toString());
+    sslFactory.destroy();
+
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
+    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    sslFactory.init();
+    Assert.assertEquals("DEFAULT_AND_LOCALHOST",
+                        sslFactory.getHostnameVerifier().toString());
+    sslFactory.destroy();
+
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT");
+    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    sslFactory.init();
+    Assert.assertEquals("STRICT", sslFactory.getHostnameVerifier().toString());
+    sslFactory.destroy();
+
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
+    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    sslFactory.init();
+    Assert.assertEquals("STRICT_IE6",
+                        sslFactory.getHostnameVerifier().toString());
+    sslFactory.destroy();
+  }
+
+  @Test(expected = GeneralSecurityException.class)
+  public void invalidHostnameVerifier() throws Exception {
+    Configuration conf = createConfiguration(false);
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
+    SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    try {
+      sslFactory.init();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
+
+}

+ 48 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java

@@ -40,6 +40,8 @@ import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -171,6 +173,52 @@ public class TestDelegationToken {
     }
     }
   }
   }
 
 
+  @Test
+  public void testGetUserNullOwner() {
+    TestDelegationTokenIdentifier ident =
+        new TestDelegationTokenIdentifier(null, null, null);
+    UserGroupInformation ugi = ident.getUser();
+    assertNull(ugi);
+  }
+  
+  @Test
+  public void testGetUserWithOwner() {
+    TestDelegationTokenIdentifier ident =
+        new TestDelegationTokenIdentifier(new Text("owner"), null, null);
+    UserGroupInformation ugi = ident.getUser();
+    assertNull(ugi.getRealUser());
+    assertEquals("owner", ugi.getUserName());
+    assertEquals(AuthenticationMethod.TOKEN, ugi.getAuthenticationMethod());
+  }
+
+  @Test
+  public void testGetUserWithOwnerEqualsReal() {
+    Text owner = new Text("owner");
+    TestDelegationTokenIdentifier ident =
+        new TestDelegationTokenIdentifier(owner, null, owner);
+    UserGroupInformation ugi = ident.getUser();
+    assertNull(ugi.getRealUser());
+    assertEquals("owner", ugi.getUserName());
+    assertEquals(AuthenticationMethod.TOKEN, ugi.getAuthenticationMethod());
+  }
+
+  @Test
+  public void testGetUserWithOwnerAndReal() {
+    Text owner = new Text("owner");
+    Text realUser = new Text("realUser");
+    TestDelegationTokenIdentifier ident =
+        new TestDelegationTokenIdentifier(owner, null, realUser);
+    UserGroupInformation ugi = ident.getUser();
+    assertNotNull(ugi.getRealUser());
+    assertNull(ugi.getRealUser().getRealUser());
+    assertEquals("owner", ugi.getUserName());
+    assertEquals("realUser", ugi.getRealUser().getUserName());
+    assertEquals(AuthenticationMethod.PROXY,
+                 ugi.getAuthenticationMethod());
+    assertEquals(AuthenticationMethod.TOKEN,
+                 ugi.getRealUser().getAuthenticationMethod());
+  }
+
   @Test
   @Test
   public void testDelegationTokenSecretManager() throws Exception {
   public void testDelegationTokenSecretManager() throws Exception {
     final TestDelegationTokenSecretManager dtSecretManager = 
     final TestDelegationTokenSecretManager dtSecretManager = 

+ 25 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java

@@ -23,7 +23,9 @@ import static com.google.common.base.Preconditions.*;
 import org.hamcrest.Description;
 import org.hamcrest.Description;
 import org.junit.Assert;
 import org.junit.Assert;
 
 
+import static org.mockito.AdditionalMatchers.geq;
 import static org.mockito.Mockito.*;
 import static org.mockito.Mockito.*;
+
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
 import org.mockito.internal.matchers.GreaterThan;
 import org.mockito.internal.matchers.GreaterThan;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
@@ -39,7 +41,11 @@ import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.util.Quantile;
+
 import static org.apache.hadoop.metrics2.lib.Interns.*;
 import static org.apache.hadoop.metrics2.lib.Interns.*;
+import static org.apache.hadoop.test.MetricsAsserts.eqName;
 
 
 /**
 /**
  * Helpers for metrics source tests
  * Helpers for metrics source tests
@@ -328,4 +334,23 @@ public class MetricsAsserts {
                                    MetricsSource source) {
                                    MetricsSource source) {
     assertGaugeGt(name, greater, getMetrics(source));
     assertGaugeGt(name, greater, getMetrics(source));
   }
   }
+  
+  /**
+   * Asserts that the NumOps and quantiles for a metric have been changed at
+   * some point to a non-zero value.
+   * 
+   * @param prefix of the metric
+   * @param rb MetricsRecordBuilder with the metric
+   */
+  public static void assertQuantileGauges(String prefix, 
+      MetricsRecordBuilder rb) {
+    verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
+    for (Quantile q : MutableQuantiles.quantiles) {
+      String nameTemplate = prefix + "%dthPercentileLatency";
+      int percentile = (int) (100 * q.quantile);
+      verify(rb).addGauge(
+          eqName(info(String.format(nameTemplate, percentile), "")),
+          geq(0l));
+    }
+  }
 }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/dev-support/findbugsExcludeFile.xml

@@ -25,4 +25,9 @@
     <Method name="destroy" />
     <Method name="destroy" />
     <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
     <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
   </Match>
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.lib.servlet.ServerWebApp" />
+    <Field name="authority" />
+    <Bug pattern="IS2_INCONSISTENT_SYNC" />
+  </Match>
 </FindBugsFilter>
 </FindBugsFilter>

+ 58 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -43,6 +43,8 @@
     <httpfs.tomcat.dist.dir>
     <httpfs.tomcat.dist.dir>
       ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat
       ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat
     </httpfs.tomcat.dist.dir>
     </httpfs.tomcat.dist.dir>
+    <kerberos.realm>LOCALHOST</kerberos.realm>
+    <test.exclude.kerberos.test>**/TestHttpFSWithKerberos.java</test.exclude.kerberos.test>
   </properties>
   </properties>
 
 
   <dependencies>
   <dependencies>
@@ -267,6 +269,22 @@
         </excludes>
         </excludes>
       </resource>
       </resource>
     </resources>
     </resources>
+    <testResources>
+      <testResource>
+        <directory>${basedir}/src/test/resources</directory>
+        <filtering>false</filtering>
+        <excludes>
+          <exclude>krb5.conf</exclude>
+        </excludes>
+      </testResource>
+      <testResource>
+        <directory>${basedir}/src/test/resources</directory>
+        <filtering>true</filtering>
+        <includes>
+          <include>krb5.conf</include>
+        </includes>
+      </testResource>
+    </testResources>
 
 
     <plugins>
     <plugins>
       <plugin>
       <plugin>
@@ -281,6 +299,16 @@
         <artifactId>maven-surefire-plugin</artifactId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
         <configuration>
           <threadCount>1</threadCount>
           <threadCount>1</threadCount>
+          <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
+          <systemPropertyVariables>
+            <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
+            <kerberos.realm>${kerberos.realm}</kerberos.realm>
+          </systemPropertyVariables>
+          <excludes>
+            <exclude>**/${test.exclude}.java</exclude>
+            <exclude>${test.exclude.pattern}</exclude>
+            <exclude>${test.exclude.kerberos.test}</exclude>
+          </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
       <plugin>
       <plugin>
@@ -395,6 +423,36 @@
   </build>
   </build>
 
 
   <profiles>
   <profiles>
+    <profile>
+      <id>testKerberos</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <properties>
+        <test.exclude.kerberos.test>_</test.exclude.kerberos.test>
+      </properties>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-surefire-plugin</artifactId>
+            <configuration>
+              <forkMode>once</forkMode>
+              <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
+              <systemPropertyVariables>
+                <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
+                <kerberos.realm>${kerberos.realm}</kerberos.realm>
+                <httpfs.http.hostname>localhost</httpfs.http.hostname>
+              </systemPropertyVariables>
+              <includes>
+                <include>**/TestHttpFSWithKerberos.java</include>
+              </includes>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    
     <profile>
     <profile>
       <id>docs</id>
       <id>docs</id>
       <activation>
       <activation>

+ 158 - 125
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.fs.http.client;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileChecksum;
@@ -28,16 +29,18 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PositionedReadable;
 import org.apache.hadoop.fs.PositionedReadable;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.Authenticator;
 import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.json.simple.JSONArray;
 import org.json.simple.JSONArray;
 import org.json.simple.JSONObject;
 import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.json.simple.parser.ParseException;
 
 
 import java.io.BufferedInputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
@@ -47,30 +50,32 @@ import java.io.FileNotFoundException;
 import java.io.FilterInputStream;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.OutputStream;
-import java.lang.reflect.Constructor;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
-import java.net.URLEncoder;
+import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.text.MessageFormat;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.Callable;
 
 
 /**
 /**
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
  * <p/>
  * <p/>
  * This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
  * This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
  */
  */
-public class HttpFSFileSystem extends FileSystem {
+public class HttpFSFileSystem extends FileSystem
+  implements DelegationTokenRenewer.Renewable {
 
 
-  public static final String SERVICE_NAME = "/webhdfs";
+  public static final String SERVICE_NAME = HttpFSUtils.SERVICE_NAME;
 
 
-  public static final String SERVICE_VERSION = "/v1";
+  public static final String SERVICE_VERSION = HttpFSUtils.SERVICE_VERSION;
 
 
-  public static final String SERVICE_PREFIX = SERVICE_NAME + SERVICE_VERSION;
+  public static final String SCHEME = "webhdfs";
 
 
   public static final String OP_PARAM = "op";
   public static final String OP_PARAM = "op";
   public static final String DO_AS_PARAM = "doas";
   public static final String DO_AS_PARAM = "doas";
@@ -84,7 +89,6 @@ public class HttpFSFileSystem extends FileSystem {
   public static final String GROUP_PARAM = "group";
   public static final String GROUP_PARAM = "group";
   public static final String MODIFICATION_TIME_PARAM = "modificationtime";
   public static final String MODIFICATION_TIME_PARAM = "modificationtime";
   public static final String ACCESS_TIME_PARAM = "accesstime";
   public static final String ACCESS_TIME_PARAM = "accesstime";
-  public static final String RENEWER_PARAM = "renewer";
 
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final Short DEFAULT_PERMISSION = 0755;
 
 
@@ -144,9 +148,6 @@ public class HttpFSFileSystem extends FileSystem {
   public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
   public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
   public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
   public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
 
 
-  public static final String DELEGATION_TOKEN_JSON = "Token";
-  public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
-
   public static final String ERROR_JSON = "RemoteException";
   public static final String ERROR_JSON = "RemoteException";
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_CLASSNAME_JSON = "javaClassName";
   public static final String ERROR_CLASSNAME_JSON = "javaClassName";
@@ -184,8 +185,31 @@ public class HttpFSFileSystem extends FileSystem {
 
 
   private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private URI uri;
   private URI uri;
+  private InetSocketAddress httpFSAddr;
   private Path workingDir;
   private Path workingDir;
+  private UserGroupInformation realUser;
   private String doAs;
   private String doAs;
+  private Token<?> delegationToken;
+
+  //This method enables handling UGI doAs with SPNEGO, we have to
+  //fallback to the realuser who logged in with Kerberos credentials
+  private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
+    throws IOException {
+    try {
+      if (realUser.getShortUserName().equals(doAs)) {
+        return callable.call();
+      } else {
+        return realUser.doAs(new PrivilegedExceptionAction<T>() {
+          @Override
+          public T run() throws Exception {
+            return callable.call();
+          }
+        });
+      }
+    } catch (Exception ex) {
+      throw new IOException(ex.toString(), ex);
+    }
+  }
 
 
   /**
   /**
    * Convenience method that creates a <code>HttpURLConnection</code> for the
    * Convenience method that creates a <code>HttpURLConnection</code> for the
@@ -204,25 +228,23 @@ public class HttpFSFileSystem extends FileSystem {
    *
    *
    * @throws IOException thrown if an IO error occurrs.
    * @throws IOException thrown if an IO error occurrs.
    */
    */
-  private HttpURLConnection getConnection(String method, Map<String, String> params,
-                                          Path path, boolean makeQualified) throws IOException {
-    params.put(DO_AS_PARAM, doAs);
+  private HttpURLConnection getConnection(final String method,
+      Map<String, String> params, Path path, boolean makeQualified)
+      throws IOException {
+    if (!realUser.getShortUserName().equals(doAs)) {
+      params.put(DO_AS_PARAM, doAs);
+    }
+    HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
     if (makeQualified) {
     if (makeQualified) {
       path = makeQualified(path);
       path = makeQualified(path);
     }
     }
-    URI uri = path.toUri();
-    StringBuilder sb = new StringBuilder();
-    sb.append(uri.getScheme()).append("://").append(uri.getAuthority()).
-      append(SERVICE_PREFIX).append(uri.getPath());
-
-    String separator = "?";
-    for (Map.Entry<String, String> entry : params.entrySet()) {
-      sb.append(separator).append(entry.getKey()).append("=").
-        append(URLEncoder.encode(entry.getValue(), "UTF8"));
-      separator = "&";
-    }
-    URL url = new URL(sb.toString());
-    return getConnection(url, method);
+    final URL url = HttpFSUtils.createHttpURL(path, params);
+    return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
+      @Override
+      public HttpURLConnection call() throws Exception {
+        return getConnection(url, method);
+      }
+    });
   }
   }
 
 
   /**
   /**
@@ -240,7 +262,8 @@ public class HttpFSFileSystem extends FileSystem {
    */
    */
   private HttpURLConnection getConnection(URL url, String method) throws IOException {
   private HttpURLConnection getConnection(URL url, String method) throws IOException {
     Class<? extends Authenticator> klass =
     Class<? extends Authenticator> klass =
-      getConf().getClass("httpfs.authenticator.class", HttpKerberosAuthenticator.class, Authenticator.class);
+      getConf().getClass("httpfs.authenticator.class",
+                         HttpFSKerberosAuthenticator.class, Authenticator.class);
     Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
     Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
     try {
     try {
       HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
       HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
@@ -254,63 +277,6 @@ public class HttpFSFileSystem extends FileSystem {
     }
     }
   }
   }
 
 
-  /**
-   * Convenience method that JSON Parses the <code>InputStream</code> of a <code>HttpURLConnection</code>.
-   *
-   * @param conn the <code>HttpURLConnection</code>.
-   *
-   * @return the parsed JSON object.
-   *
-   * @throws IOException thrown if the <code>InputStream</code> could not be JSON parsed.
-   */
-  private static Object jsonParse(HttpURLConnection conn) throws IOException {
-    try {
-      JSONParser parser = new JSONParser();
-      return parser.parse(new InputStreamReader(conn.getInputStream()));
-    } catch (ParseException ex) {
-      throw new IOException("JSON parser error, " + ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Validates the status of an <code>HttpURLConnection</code> against an expected HTTP
-   * status code. If the current status code is not the expected one it throws an exception
-   * with a detail message using Server side error messages if available.
-   *
-   * @param conn the <code>HttpURLConnection</code>.
-   * @param expected the expected HTTP status code.
-   *
-   * @throws IOException thrown if the current status code does not match the expected one.
-   */
-  private static void validateResponse(HttpURLConnection conn, int expected) throws IOException {
-    int status = conn.getResponseCode();
-    if (status != expected) {
-      try {
-        JSONObject json = (JSONObject) jsonParse(conn);
-        json = (JSONObject) json.get(ERROR_JSON);
-        String message = (String) json.get(ERROR_MESSAGE_JSON);
-        String exception = (String) json.get(ERROR_EXCEPTION_JSON);
-        String className = (String) json.get(ERROR_CLASSNAME_JSON);
-
-        try {
-          ClassLoader cl = HttpFSFileSystem.class.getClassLoader();
-          Class klass = cl.loadClass(className);
-          Constructor constr = klass.getConstructor(String.class);
-          throw (IOException) constr.newInstance(message);
-        } catch (IOException ex) {
-          throw ex;
-        } catch (Exception ex) {
-          throw new IOException(MessageFormat.format("{0} - {1}", exception, message));
-        }
-      } catch (IOException ex) {
-        if (ex.getCause() instanceof IOException) {
-          throw (IOException) ex.getCause();
-        }
-        throw new IOException(MessageFormat.format("HTTP status [{0}], {1}", status, conn.getResponseMessage()));
-      }
-    }
-  }
-
   /**
   /**
    * Called after a new FileSystem instance is constructed.
    * Called after a new FileSystem instance is constructed.
    *
    *
@@ -320,15 +286,28 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
   public void initialize(URI name, Configuration conf) throws IOException {
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    doAs = ugi.getUserName();
+
+    //the real use is the one that has the Kerberos credentials needed for
+    //SPNEGO to work
+    realUser = ugi.getRealUser();
+    if (realUser == null) {
+      realUser = UserGroupInformation.getLoginUser();
+    }
+    doAs = ugi.getShortUserName();
     super.initialize(name, conf);
     super.initialize(name, conf);
     try {
     try {
-      uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort());
+      uri = new URI(name.getScheme() + "://" + name.getAuthority());
+      httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
     } catch (URISyntaxException ex) {
     } catch (URISyntaxException ex) {
       throw new IOException(ex);
       throw new IOException(ex);
     }
     }
   }
   }
 
 
+  @Override
+  public String getScheme() {
+    return SCHEME;
+  }
+
   /**
   /**
    * Returns a URI whose scheme and authority identify this FileSystem.
    * Returns a URI whose scheme and authority identify this FileSystem.
    *
    *
@@ -339,6 +318,16 @@ public class HttpFSFileSystem extends FileSystem {
     return uri;
     return uri;
   }
   }
 
 
+  /**
+   * Get the default port for this file system.
+   * @return the default port or 0 if there isn't one
+   */
+  @Override
+  protected int getDefaultPort() {
+    return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
+  }
+
   /**
   /**
    * HttpFSServer subclass of the <code>FSDataInputStream</code>.
    * HttpFSServer subclass of the <code>FSDataInputStream</code>.
    * <p/>
    * <p/>
@@ -397,7 +386,7 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.OPEN.toString());
     params.put(OP_PARAM, Operation.OPEN.toString());
     HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
     HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
                                            f, true);
                                            f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     return new FSDataInputStream(
     return new FSDataInputStream(
       new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
       new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
   }
   }
@@ -424,7 +413,7 @@ public class HttpFSFileSystem extends FileSystem {
       try {
       try {
         super.close();
         super.close();
       } finally {
       } finally {
-        validateResponse(conn, closeStatus);
+        HttpFSUtils.validateResponse(conn, closeStatus);
       }
       }
     }
     }
 
 
@@ -460,11 +449,11 @@ public class HttpFSFileSystem extends FileSystem {
             OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
             OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
             return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
             return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
           } catch (IOException ex) {
           } catch (IOException ex) {
-            validateResponse(conn, expectedStatus);
+            HttpFSUtils.validateResponse(conn, expectedStatus);
             throw ex;
             throw ex;
           }
           }
         } else {
         } else {
-          validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
+          HttpFSUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
           throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
           throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
         }
         }
       } else {
       } else {
@@ -476,7 +465,7 @@ public class HttpFSFileSystem extends FileSystem {
       if (exceptionAlreadyHandled) {
       if (exceptionAlreadyHandled) {
         throw ex;
         throw ex;
       } else {
       } else {
-        validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
+        HttpFSUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
         throw ex;
         throw ex;
       }
       }
     }
     }
@@ -548,8 +537,8 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(DESTINATION_PARAM, dst.toString());
     params.put(DESTINATION_PARAM, dst.toString());
     HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
     HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
                                            params, src, true);
                                            params, src, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     return (Boolean) json.get(RENAME_JSON);
     return (Boolean) json.get(RENAME_JSON);
   }
   }
 
 
@@ -584,8 +573,8 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
     params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
     HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
     HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
                                            params, f, true);
                                            params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     return (Boolean) json.get(DELETE_JSON);
     return (Boolean) json.get(DELETE_JSON);
   }
   }
 
 
@@ -605,8 +594,8 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.LISTSTATUS.toString());
     params.put(OP_PARAM, Operation.LISTSTATUS.toString());
     HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
     HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
                                            params, f, true);
                                            params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUSES_JSON);
     json = (JSONObject) json.get(FILE_STATUSES_JSON);
     JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
     JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
     FileStatus[] array = new FileStatus[jsonArray.size()];
     FileStatus[] array = new FileStatus[jsonArray.size()];
@@ -653,8 +642,8 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(PERMISSION_PARAM, permissionToString(permission));
     params.put(PERMISSION_PARAM, permissionToString(permission));
     HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
     HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
                                            params, f, true);
                                            params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     return (Boolean) json.get(MKDIRS_JSON);
     return (Boolean) json.get(MKDIRS_JSON);
   }
   }
 
 
@@ -674,8 +663,8 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
     params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
     HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
     HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
                                            params, f, true);
                                            params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUS_JSON);
     json = (JSONObject) json.get(FILE_STATUS_JSON);
     f = makeQualified(f);
     f = makeQualified(f);
     return createFileStatus(f, json);
     return createFileStatus(f, json);
@@ -693,8 +682,8 @@ public class HttpFSFileSystem extends FileSystem {
       HttpURLConnection conn =
       HttpURLConnection conn =
         getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
         getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
                       new Path(getUri().toString(), "/"), false);
                       new Path(getUri().toString(), "/"), false);
-      validateResponse(conn, HttpURLConnection.HTTP_OK);
-      JSONObject json = (JSONObject) jsonParse(conn);
+      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+      JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
       return new Path((String) json.get(HOME_DIR_JSON));
       return new Path((String) json.get(HOME_DIR_JSON));
     } catch (IOException ex) {
     } catch (IOException ex) {
       throw new RuntimeException(ex);
       throw new RuntimeException(ex);
@@ -718,7 +707,7 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(GROUP_PARAM, groupname);
     params.put(GROUP_PARAM, groupname);
     HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
     HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
                                            params, p, true);
                                            params, p, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
   }
 
 
   /**
   /**
@@ -733,7 +722,7 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.SETPERMISSION.toString());
     params.put(OP_PARAM, Operation.SETPERMISSION.toString());
     params.put(PERMISSION_PARAM, permissionToString(permission));
     params.put(PERMISSION_PARAM, permissionToString(permission));
     HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
     HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
   }
 
 
   /**
   /**
@@ -755,7 +744,7 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(ACCESS_TIME_PARAM, Long.toString(atime));
     params.put(ACCESS_TIME_PARAM, Long.toString(atime));
     HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
     HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
                                            params, p, true);
                                            params, p, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
   }
 
 
   /**
   /**
@@ -777,19 +766,11 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(REPLICATION_PARAM, Short.toString(replication));
     params.put(REPLICATION_PARAM, Short.toString(replication));
     HttpURLConnection conn =
     HttpURLConnection conn =
       getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
       getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) jsonParse(conn);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
     return (Boolean) json.get(SET_REPLICATION_JSON);
     return (Boolean) json.get(SET_REPLICATION_JSON);
   }
   }
 
 
-  /**
-   * Creates a <code>FileStatus</code> object using a JSON file-status payload
-   * received from a HttpFSServer server.
-   *
-   * @param json a JSON file-status payload received from a HttpFSServer server
-   *
-   * @return the corresponding <code>FileStatus</code>
-   */
   private FileStatus createFileStatus(Path parent, JSONObject json) {
   private FileStatus createFileStatus(Path parent, JSONObject json) {
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
@@ -828,9 +809,9 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
     params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
     HttpURLConnection conn =
     HttpURLConnection conn =
       getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
       getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json =
-      (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) ((JSONObject)
+      HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
     return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
     return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
                               (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
                               (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
                               (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
                               (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
@@ -846,9 +827,9 @@ public class HttpFSFileSystem extends FileSystem {
     params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
     params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
     HttpURLConnection conn =
     HttpURLConnection conn =
       getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
       getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
-    validateResponse(conn, HttpURLConnection.HTTP_OK);
-    final JSONObject json =
-      (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    final JSONObject json = (JSONObject) ((JSONObject)
+      HttpFSUtils.jsonParse(conn)).get(FILE_CHECKSUM_JSON);
     return new FileChecksum() {
     return new FileChecksum() {
       @Override
       @Override
       public String getAlgorithmName() {
       public String getAlgorithmName() {
@@ -877,4 +858,56 @@ public class HttpFSFileSystem extends FileSystem {
     };
     };
   }
   }
 
 
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public Token<?> getDelegationToken(final String renewer)
+    throws IOException {
+    return doAsRealUserIfNecessary(new Callable<Token<?>>() {
+      @Override
+      public Token<?> call() throws Exception {
+        return HttpFSKerberosAuthenticator.
+          getDelegationToken(uri, httpFSAddr, authToken, renewer);
+      }
+    });
+  }
+
+
+  @Override
+  public List<Token<?>> getDelegationTokens(final String renewer)
+    throws IOException {
+    return doAsRealUserIfNecessary(new Callable<List<Token<?>>>() {
+      @Override
+      public List<Token<?>> call() throws Exception {
+        return HttpFSKerberosAuthenticator.
+          getDelegationTokens(uri, httpFSAddr, authToken, renewer);
+      }
+    });
+  }
+
+  public long renewDelegationToken(final Token<?> token) throws IOException {
+    return doAsRealUserIfNecessary(new Callable<Long>() {
+      @Override
+      public Long call() throws Exception {
+        return HttpFSKerberosAuthenticator.
+          renewDelegationToken(uri,  authToken, token);
+      }
+    });
+  }
+
+  public void cancelDelegationToken(final Token<?> token) throws IOException {
+    HttpFSKerberosAuthenticator.
+      cancelDelegationToken(uri, authToken, token);
+  }
+
+  @Override
+  public Token<?> getRenewToken() {
+    return delegationToken;
+  }
+
+  @Override
+  public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
+    delegationToken = token;
+  }
+
 }
 }

+ 226 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java

@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.client;
+
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A <code>KerberosAuthenticator</code> subclass that fallback to
+ * {@link HttpFSPseudoAuthenticator}.
+ */
+public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
+
+  /**
+   * Returns the fallback authenticator if the server does not use
+   * Kerberos SPNEGO HTTP authentication.
+   *
+   * @return a {@link HttpFSPseudoAuthenticator} instance.
+   */
+  @Override
+  protected Authenticator getFallBackAuthenticator() {
+    return new HttpFSPseudoAuthenticator();
+  }
+
+  private static final String HTTP_GET = "GET";
+  private static final String HTTP_PUT = "PUT";
+
+  public static final String DELEGATION_PARAM = "delegation";
+  public static final String TOKEN_PARAM = "token";
+  public static final String RENEWER_PARAM = "renewer";
+  public static final String TOKEN_KIND = "HTTPFS_DELEGATION_TOKEN";
+  public static final String DELEGATION_TOKEN_JSON = "Token";
+  public static final String DELEGATION_TOKENS_JSON = "Tokens";
+  public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
+  public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
+
+  /**
+   * DelegationToken operations.
+   */
+  public static enum DelegationTokenOperation {
+    GETDELEGATIONTOKEN(HTTP_GET, true),
+    GETDELEGATIONTOKENS(HTTP_GET, true),
+    RENEWDELEGATIONTOKEN(HTTP_PUT, true),
+    CANCELDELEGATIONTOKEN(HTTP_PUT, false);
+
+    private String httpMethod;
+    private boolean requiresKerberosCredentials;
+
+    private DelegationTokenOperation(String httpMethod,
+                                     boolean requiresKerberosCredentials) {
+      this.httpMethod = httpMethod;
+      this.requiresKerberosCredentials = requiresKerberosCredentials;
+    }
+
+    public String getHttpMethod() {
+      return httpMethod;
+    }
+
+    public boolean requiresKerberosCredentials() {
+      return requiresKerberosCredentials;
+    }
+
+  }
+
+  public static void injectDelegationToken(Map<String, String> params,
+                                          Token<?> dtToken)
+    throws IOException {
+    if (dtToken != null) {
+      params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
+    }
+  }
+
+  private boolean hasDelegationToken(URL url) {
+    return url.getQuery().contains(DELEGATION_PARAM + "=");
+  }
+
+  @Override
+  public void authenticate(URL url, AuthenticatedURL.Token token)
+    throws IOException, AuthenticationException {
+    if (!hasDelegationToken(url)) {
+      super.authenticate(url, token);
+    }
+  }
+
+  public static final String OP_PARAM = "op";
+
+  private static List<Token<?>> getDelegationTokens(URI fsURI,
+    InetSocketAddress httpFSAddr, DelegationTokenOperation op,
+    AuthenticatedURL.Token token, String renewer)
+    throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, op.toString());
+    params.put(RENEWER_PARAM,renewer);
+    URL url = HttpFSUtils.createHttpURL(new Path(fsURI), params);
+    AuthenticatedURL aUrl =
+      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
+    try {
+      HttpURLConnection conn = aUrl.openConnection(url, token);
+      conn.setRequestMethod(op.getHttpMethod());
+      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+      List<String> list = new ArrayList<String>();
+      if (op == DelegationTokenOperation.GETDELEGATIONTOKEN) {
+        JSONObject json = (JSONObject) ((JSONObject)
+          HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
+        String tokenStr = (String)
+          json.get(DELEGATION_TOKEN_URL_STRING_JSON);
+        list.add(tokenStr);
+      }
+      else if (op == DelegationTokenOperation.GETDELEGATIONTOKENS) {
+        JSONObject json = (JSONObject) ((JSONObject)
+          HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKENS_JSON);
+        JSONArray array = (JSONArray) json.get(DELEGATION_TOKEN_JSON);
+        for (Object element : array) {
+          String tokenStr = (String)
+            ((Map) element).get(DELEGATION_TOKEN_URL_STRING_JSON);
+          list.add(tokenStr);
+        }
+
+      } else {
+        throw new IllegalArgumentException("Invalid operation: " +
+                                           op.toString());
+      }
+      List<Token<?>> dTokens = new ArrayList<Token<?>>();
+      for (String tokenStr : list) {
+        Token<AbstractDelegationTokenIdentifier> dToken =
+          new Token<AbstractDelegationTokenIdentifier>();
+        dToken.decodeFromUrlString(tokenStr);
+        dTokens.add(dToken);
+        SecurityUtil.setTokenService(dToken, httpFSAddr);
+      }
+      return dTokens;
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex.toString(), ex);
+    }
+  }
+
+  public static List<Token<?>> getDelegationTokens(URI fsURI,
+    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
+    String renewer) throws IOException {
+    return getDelegationTokens(fsURI, httpFSAddr,
+      DelegationTokenOperation.GETDELEGATIONTOKENS, token, renewer);
+  }
+
+  public static Token<?> getDelegationToken(URI fsURI,
+    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
+    String renewer) throws IOException {
+    return getDelegationTokens(fsURI, httpFSAddr,
+      DelegationTokenOperation.GETDELEGATIONTOKENS, token, renewer).get(0);
+  }
+
+  public static long renewDelegationToken(URI fsURI,
+    AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM,
+               DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
+    params.put(TOKEN_PARAM, dToken.encodeToUrlString());
+    URL url = HttpFSUtils.createHttpURL(new Path(fsURI), params);
+    AuthenticatedURL aUrl =
+      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
+    try {
+      HttpURLConnection conn = aUrl.openConnection(url, token);
+      conn.setRequestMethod(
+        DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
+      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+      JSONObject json = (JSONObject) ((JSONObject)
+        HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
+      return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex.toString(), ex);
+    }
+  }
+
+  public static void cancelDelegationToken(URI fsURI,
+    AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM,
+               DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
+    params.put(TOKEN_PARAM, dToken.encodeToUrlString());
+    URL url = HttpFSUtils.createHttpURL(new Path(fsURI), params);
+    AuthenticatedURL aUrl =
+      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
+    try {
+      HttpURLConnection conn = aUrl.openConnection(url, token);
+      conn.setRequestMethod(
+        DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
+      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex.toString(), ex);
+    }
+  }
+
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java → hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java

@@ -27,7 +27,7 @@ import java.io.IOException;
  * A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
  * A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
  * <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
  * <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
  */
  */
-public class HttpPseudoAuthenticator extends PseudoAuthenticator {
+public class HttpFSPseudoAuthenticator extends PseudoAuthenticator {
 
 
   /**
   /**
    * Return the client user name.
    * Return the client user name.

+ 148 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java

@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.client;
+
+import org.apache.hadoop.fs.Path;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.lang.reflect.Constructor;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.text.MessageFormat;
+import java.util.Map;
+
+/**
+ * Utility methods used by HttpFS classes.
+ */
+public class HttpFSUtils {
+
+  public static final String SERVICE_NAME = "/webhdfs";
+
+  public static final String SERVICE_VERSION = "/v1";
+
+  private static final String SERVICE_PATH = SERVICE_NAME + SERVICE_VERSION;
+
+  /**
+   * Convenience method that creates an HTTP <code>URL</code> for the
+   * HttpFSServer file system operations.
+   * <p/>
+   *
+   * @param path the file path.
+   * @param params the query string parameters.
+   *
+   * @return a <code>URL</code> for the HttpFSServer server,
+   *
+   * @throws IOException thrown if an IO error occurrs.
+   */
+  static URL createHttpURL(Path path, Map<String, String> params)
+    throws IOException {
+    URI uri = path.toUri();
+    String realScheme;
+    if (uri.getScheme().equalsIgnoreCase(HttpFSFileSystem.SCHEME)) {
+      realScheme = "http";
+    } else {
+      throw new IllegalArgumentException(MessageFormat.format(
+        "Invalid scheme [{0}] it should be 'webhdfs'", uri));
+    }
+    StringBuilder sb = new StringBuilder();
+    sb.append(realScheme).append("://").append(uri.getAuthority()).
+      append(SERVICE_PATH).append(uri.getPath());
+
+    String separator = "?";
+    for (Map.Entry<String, String> entry : params.entrySet()) {
+      sb.append(separator).append(entry.getKey()).append("=").
+        append(URLEncoder.encode(entry.getValue(), "UTF8"));
+      separator = "&";
+    }
+    return new URL(sb.toString());
+  }
+
+  /**
+   * Validates the status of an <code>HttpURLConnection</code> against an
+   * expected HTTP status code. If the current status code is not the expected
+   * one it throws an exception with a detail message using Server side error
+   * messages if available.
+   *
+   * @param conn the <code>HttpURLConnection</code>.
+   * @param expected the expected HTTP status code.
+   *
+   * @throws IOException thrown if the current status code does not match the
+   * expected one.
+   */
+  @SuppressWarnings({"unchecked", "deprecation"})
+  static void validateResponse(HttpURLConnection conn, int expected)
+    throws IOException {
+    int status = conn.getResponseCode();
+    if (status != expected) {
+      try {
+        JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+        json = (JSONObject) json.get(HttpFSFileSystem.ERROR_JSON);
+        String message = (String) json.get(HttpFSFileSystem.ERROR_MESSAGE_JSON);
+        String exception = (String)
+          json.get(HttpFSFileSystem.ERROR_EXCEPTION_JSON);
+        String className = (String)
+          json.get(HttpFSFileSystem.ERROR_CLASSNAME_JSON);
+
+        try {
+          ClassLoader cl = HttpFSFileSystem.class.getClassLoader();
+          Class klass = cl.loadClass(className);
+          Constructor constr = klass.getConstructor(String.class);
+          throw (IOException) constr.newInstance(message);
+        } catch (IOException ex) {
+          throw ex;
+        } catch (Exception ex) {
+          throw new IOException(MessageFormat.format("{0} - {1}", exception,
+                                                     message));
+        }
+      } catch (IOException ex) {
+        if (ex.getCause() instanceof IOException) {
+          throw (IOException) ex.getCause();
+        }
+        throw new IOException(
+          MessageFormat.format("HTTP status [{0}], {1}",
+                               status, conn.getResponseMessage()));
+      }
+    }
+  }
+
+  /**
+   * Convenience method that JSON Parses the <code>InputStream</code> of a
+   * <code>HttpURLConnection</code>.
+   *
+   * @param conn the <code>HttpURLConnection</code>.
+   *
+   * @return the parsed JSON object.
+   *
+   * @throws IOException thrown if the <code>InputStream</code> could not be
+   * JSON parsed.
+   */
+  static Object jsonParse(HttpURLConnection conn) throws IOException {
+    try {
+      JSONParser parser = new JSONParser();
+      return parser.parse(new InputStreamReader(conn.getInputStream()));
+    } catch (ParseException ex) {
+      throw new IOException("JSON parser error, " + ex.getMessage(), ex);
+    }
+  }
+}

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java → hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.fs.http.server;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-
 import javax.servlet.FilterConfig;
 import javax.servlet.FilterConfig;
 import java.io.FileReader;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
@@ -31,7 +30,7 @@ import java.util.Properties;
  * Subclass of hadoop-auth <code>AuthenticationFilter</code> that obtains its configuration
  * Subclass of hadoop-auth <code>AuthenticationFilter</code> that obtains its configuration
  * from HttpFSServer's server configuration.
  * from HttpFSServer's server configuration.
  */
  */
-public class AuthFilter extends AuthenticationFilter {
+public class HttpFSAuthenticationFilter extends AuthenticationFilter {
   private static final String CONF_PREFIX = "httpfs.authentication.";
   private static final String CONF_PREFIX = "httpfs.authentication.";
 
 
   private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
   private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@@ -63,6 +62,11 @@ public class AuthFilter extends AuthenticationFilter {
       }
       }
     }
     }
 
 
+    if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
+      props.setProperty(AUTH_TYPE,
+                        HttpFSKerberosAuthenticationHandler.class.getName());
+    }
+
     String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
     String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
     if (signatureSecretFile == null) {
     if (signatureSecretFile == null) {
       throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
       throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
@@ -84,5 +88,4 @@ public class AuthFilter extends AuthenticationFilter {
     return props;
     return props;
   }
   }
 
 
-
 }
 }

+ 255 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java

@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
+import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
+import org.apache.hadoop.lib.service.DelegationTokenManager;
+import org.apache.hadoop.lib.service.DelegationTokenManagerException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.token.Token;
+import org.json.simple.JSONObject;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Server side <code>AuthenticationHandler</code> that authenticates requests
+ * using the incoming delegation token as a 'delegation' query string parameter.
+ * <p/>
+ * If not delegation token is present in the request it delegates to the
+ * {@link KerberosAuthenticationHandler}
+ */
+public class HttpFSKerberosAuthenticationHandler
+  extends KerberosAuthenticationHandler {
+
+  static final Set<String> DELEGATION_TOKEN_OPS =
+    new HashSet<String>();
+
+  static {
+    DELEGATION_TOKEN_OPS.add(
+      DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
+    DELEGATION_TOKEN_OPS.add(
+      DelegationTokenOperation.GETDELEGATIONTOKENS.toString());
+    DELEGATION_TOKEN_OPS.add(
+      DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
+    DELEGATION_TOKEN_OPS.add(
+      DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
+  }
+
+  public static final String TYPE = "kerberos-dt";
+
+  /**
+   * Returns authentication type of the handler.
+   *
+   * @return <code>delegationtoken-kerberos</code>
+   */
+  @Override
+  public String getType() {
+    return TYPE;
+  }
+
+  private static final String ENTER = System.getProperty("line.separator");
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public boolean managementOperation(AuthenticationToken token,
+      HttpServletRequest request, HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    boolean requestContinues = true;
+    String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
+    op = (op != null) ? op.toUpperCase() : null;
+    if (DELEGATION_TOKEN_OPS.contains(op) &&
+        !request.getMethod().equals("OPTIONS")) {
+      DelegationTokenOperation dtOp =
+        DelegationTokenOperation.valueOf(op);
+      if (dtOp.getHttpMethod().equals(request.getMethod())) {
+        if (dtOp.requiresKerberosCredentials() && token == null) {
+          response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+            MessageFormat.format(
+              "Operation [{0}] requires SPNEGO authentication established",
+              dtOp));
+          requestContinues = false;
+        } else {
+          DelegationTokenManager tokenManager =
+            HttpFSServerWebApp.get().get(DelegationTokenManager.class);
+          try {
+            Map map = null;
+            switch (dtOp) {
+              case GETDELEGATIONTOKEN:
+              case GETDELEGATIONTOKENS:
+                String renewerParam =
+                  request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
+                if (renewerParam == null) {
+                  renewerParam = token.getUserName();
+                }
+                Token<?> dToken = tokenManager.createToken(
+                  UserGroupInformation.getCurrentUser(), renewerParam);
+                if (dtOp == DelegationTokenOperation.GETDELEGATIONTOKEN) {
+                  map = delegationTokenToJSON(dToken);
+                } else {
+                  map = delegationTokensToJSON(Arrays.asList((Token)dToken));
+                }
+                break;
+              case RENEWDELEGATIONTOKEN:
+              case CANCELDELEGATIONTOKEN:
+                String tokenParam =
+                  request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
+                if (tokenParam == null) {
+                  response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+                    MessageFormat.format(
+                      "Operation [{0}] requires the parameter [{1}]",
+                      dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
+                  requestContinues = false;
+                } else {
+                  if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
+                    Token<DelegationTokenIdentifier> dt =
+                      new Token<DelegationTokenIdentifier>();
+                    dt.decodeFromUrlString(tokenParam);
+                    tokenManager.cancelToken(dt,
+                      UserGroupInformation.getCurrentUser().getUserName());
+                  } else {
+                    Token<DelegationTokenIdentifier> dt =
+                      new Token<DelegationTokenIdentifier>();
+                    dt.decodeFromUrlString(tokenParam);
+                    long expirationTime =
+                      tokenManager.renewToken(dt, token.getUserName());
+                    map = new HashMap();
+                    map.put("long", expirationTime);
+                  }
+                }
+                break;
+            }
+            if (requestContinues) {
+              response.setStatus(HttpServletResponse.SC_OK);
+              if (map != null) {
+                response.setContentType(MediaType.APPLICATION_JSON);
+                Writer writer = response.getWriter();
+                JSONObject.writeJSONString(map, writer);
+                writer.write(ENTER);
+                writer.flush();
+
+              }
+              requestContinues = false;
+            }
+          } catch (DelegationTokenManagerException ex) {
+            throw new AuthenticationException(ex.toString(), ex);
+          }
+        }
+      } else {
+        response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+          MessageFormat.format(
+            "Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
+            request.getMethod(), dtOp, dtOp.getHttpMethod()));
+        requestContinues = false;
+      }
+    }
+    return requestContinues;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static Map delegationTokenToJSON(Token token) throws IOException {
+    Map json = new LinkedHashMap();
+    json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
+             token.encodeToUrlString());
+    Map response = new LinkedHashMap();
+    response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
+    return response;
+  }
+  
+  @SuppressWarnings("unchecked")
+  private static Map delegationTokensToJSON(List<Token> tokens)
+    throws IOException {
+    List list = new ArrayList();
+    for (Token token : tokens) {
+      Map map = new HashMap();
+      map.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
+              token.encodeToUrlString());
+      list.add(map);
+    }
+    Map map = new HashMap();
+    map.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, list);
+    Map response = new LinkedHashMap();
+    response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKENS_JSON, map);
+    return response;
+  }
+
+  /**
+   * Authenticates a request looking for the <code>delegation</code>
+   * query-string parameter and verifying it is a valid token. If there is not
+   * <code>delegation</code> query-string parameter, it delegates the
+   * authentication to the {@link KerberosAuthenticationHandler} unless it is
+   * disabled.
+   *
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return the authentication token for the authenticated request.
+   * @throws IOException thrown if an IO error occurred.
+   * @throws AuthenticationException thrown if the authentication failed.
+   */
+  @Override
+  public AuthenticationToken authenticate(HttpServletRequest request,
+                                          HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    AuthenticationToken token;
+    String delegationParam =
+      request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
+    if (delegationParam != null) {
+      try {
+        Token<DelegationTokenIdentifier> dt =
+          new Token<DelegationTokenIdentifier>();
+        dt.decodeFromUrlString(delegationParam);
+        DelegationTokenManager tokenManager =
+          HttpFSServerWebApp.get().get(DelegationTokenManager.class);
+        UserGroupInformation ugi = tokenManager.verifyToken(dt);
+        final String shortName = ugi.getShortUserName();
+
+        // creating a ephemeral token
+        token = new AuthenticationToken(shortName, ugi.getUserName(),
+                                        getType());
+        token.setExpires(0);
+      } catch (Throwable ex) {
+        throw new AuthenticationException("Could not verify DelegationToken, " +
+                                          ex.toString(), ex);
+      }
+    } else {
+      token = super.authenticate(request, response);
+    }
+    return token;
+  }
+
+
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java

@@ -70,7 +70,7 @@ public class HttpFSServerWebApp extends ServerWebApp {
   /**
   /**
    * Constructor used for testing purposes.
    * Constructor used for testing purposes.
    */
    */
-  protected HttpFSServerWebApp(String homeDir, String configDir, String logDir,
+  public HttpFSServerWebApp(String homeDir, String configDir, String logDir,
                                String tempDir, Configuration config) {
                                String tempDir, Configuration config) {
     super(NAME, homeDir, configDir, logDir, tempDir, config);
     super(NAME, homeDir, configDir, logDir, tempDir, config);
   }
   }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java

@@ -39,7 +39,11 @@ public class ServerException extends XException {
     S08("Could not load service classes, {0}"),
     S08("Could not load service classes, {0}"),
     S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
     S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
     S10("Service [{0}] requires service [{1}]"),
     S10("Service [{0}] requires service [{1}]"),
-    S11("Service [{0}] exception during status change to [{1}] -server shutting down-, {2}");
+    S11("Service [{0}] exception during status change to [{1}] -server shutting down-, {2}"),
+    S12("Could not start service [{0}], {1}"),
+    S13("Missing system property [{0}]"),
+    S14("Could not initialize server, {0}")
+    ;
 
 
     private String msg;
     private String msg;
 
 

+ 57 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.lib.service;
+
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * HttpFS <code>DelegationTokenIdentifier</code> implementation.
+ */
+public class DelegationTokenIdentifier
+  extends AbstractDelegationTokenIdentifier {
+
+  public static final Text KIND_NAME =
+    new Text(HttpFSKerberosAuthenticator.TOKEN_KIND);
+
+  public DelegationTokenIdentifier() {
+  }
+
+  /**
+   * Create a new delegation token identifier
+   *
+   * @param owner the effective username of the token owner
+   * @param renewer the username of the renewer
+   * @param realUser the real username of the token owner
+   */
+  public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
+    super(owner, renewer, realUser);
+  }
+
+
+  /**
+   * Returns the kind, <code>TOKEN_KIND</code>.
+   * @return returns <code>TOKEN_KIND</code>.
+   */
+  @Override
+  public Text getKind() {
+    return KIND_NAME;
+  }
+
+}

+ 76 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.lib.service;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * Service interface to manage HttpFS delegation tokens.
+ */
+public interface DelegationTokenManager {
+
+  /**
+   * Creates a delegation token.
+   *
+   * @param ugi UGI creating the token.
+   * @param renewer token renewer.
+   * @return new delegation token.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * created.
+   */
+  public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
+                                                      String renewer)
+    throws DelegationTokenManagerException;
+
+  /**
+   * Renews a delegation token.
+   *
+   * @param token delegation token to renew.
+   * @param renewer token renewer.
+   * @return epoc expiration time.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * renewed.
+   */
+  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
+    throws DelegationTokenManagerException;
+
+  /**
+   * Cancels a delegation token.
+   *
+   * @param token delegation token to cancel.
+   * @param canceler token canceler.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * canceled.
+   */
+  public void cancelToken(Token<DelegationTokenIdentifier> token,
+                          String canceler)
+    throws DelegationTokenManagerException;
+
+  /**
+   * Verifies a delegation token.
+   *
+   * @param token delegation token to verify.
+   * @return the UGI for the token.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * verified.
+   */
+  public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
+    throws DelegationTokenManagerException;
+
+}

+ 49 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.lib.service;
+
+import org.apache.hadoop.lib.lang.XException;
+
+/**
+ * Exception thrown by the {@link DelegationTokenManager} service implementation.
+ */
+public class DelegationTokenManagerException extends XException {
+
+  public enum ERROR implements XException.ERROR {
+    DT01("Could not verify delegation token, {0}"),
+    DT02("Could not renew delegation token, {0}"),
+    DT03("Could not cancel delegation token, {0}"),
+    DT04("Could not create delegation token, {0}");
+
+    private String template;
+
+    ERROR(String template) {
+      this.template = template;
+    }
+
+    @Override
+    public String getTemplate() {
+      return template;
+    }
+  }
+
+  public DelegationTokenManagerException(ERROR error, Object... params) {
+    super(error, params);
+  }
+
+}

+ 231 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java

@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.lib.service.security;
+
+import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.lib.server.BaseService;
+import org.apache.hadoop.lib.server.ServerException;
+import org.apache.hadoop.lib.server.ServiceException;
+import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
+import org.apache.hadoop.lib.service.DelegationTokenManager;
+import org.apache.hadoop.lib.service.DelegationTokenManagerException;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+/**
+ * DelegationTokenManager service implementation.
+ */
+public class DelegationTokenManagerService extends BaseService
+  implements DelegationTokenManager {
+
+  private static final String PREFIX = "delegation.token.manager";
+
+  private static final String UPDATE_INTERVAL = "update.interval";
+
+  private static final String MAX_LIFETIME = "max.lifetime";
+
+  private static final String RENEW_INTERVAL = "renew.interval";
+
+  private static final long HOUR = 60 * 60 * 1000;
+  private static final long DAY = 24 * HOUR;
+
+  DelegationTokenSecretManager secretManager = null;
+
+  public DelegationTokenManagerService() {
+    super(PREFIX);
+  }
+
+  /**
+   * Initializes the service.
+   *
+   * @throws ServiceException thrown if the service could not be initialized.
+   */
+  @Override
+  protected void init() throws ServiceException {
+
+    long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
+    long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
+    long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
+    secretManager = new DelegationTokenSecretManager(updateInterval,
+                                                     maxLifetime,
+                                                     renewInterval, HOUR);
+    try {
+      secretManager.startThreads();
+    } catch (IOException ex) {
+      throw new ServiceException(ServiceException.ERROR.S12,
+                                 DelegationTokenManager.class.getSimpleName(),
+                                 ex.toString(), ex);
+    }
+  }
+
+  /**
+   * Destroys the service.
+   */
+  @Override
+  public void destroy() {
+    secretManager.stopThreads();
+    super.destroy();
+  }
+
+  /**
+   * Returns the service interface.
+   *
+   * @return the service interface.
+   */
+  @Override
+  public Class getInterface() {
+    return DelegationTokenManager.class;
+  }
+
+  /**
+   * Creates a delegation token.
+   *
+   * @param ugi UGI creating the token.
+   * @param renewer token renewer.
+   * @return new delegation token.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * created.
+   */
+  @Override
+  public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
+                                                      String renewer)
+    throws DelegationTokenManagerException {
+    renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
+    String user = ugi.getUserName();
+    Text owner = new Text(user);
+    Text realUser = null;
+    if (ugi.getRealUser() != null) {
+      realUser = new Text(ugi.getRealUser().getUserName());
+    }
+    DelegationTokenIdentifier tokenIdentifier =
+      new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
+    Token<DelegationTokenIdentifier> token =
+      new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
+    try {
+      SecurityUtil.setTokenService(token,
+                                   HttpFSServerWebApp.get().getAuthority());
+    } catch (ServerException ex) {
+      throw new DelegationTokenManagerException(
+        DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
+    }
+    return token;
+  }
+
+  /**
+   * Renews a delegation token.
+   *
+   * @param token delegation token to renew.
+   * @param renewer token renewer.
+   * @return epoc expiration time.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * renewed.
+   */
+  @Override
+  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
+    throws DelegationTokenManagerException {
+    try {
+      return secretManager.renewToken(token, renewer);
+    } catch (IOException ex) {
+      throw new DelegationTokenManagerException(
+        DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
+    }
+  }
+
+  /**
+   * Cancels a delegation token.
+   *
+   * @param token delegation token to cancel.
+   * @param canceler token canceler.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * canceled.
+   */
+  @Override
+  public void cancelToken(Token<DelegationTokenIdentifier> token,
+                          String canceler)
+    throws DelegationTokenManagerException {
+    try {
+      secretManager.cancelToken(token, canceler);
+    } catch (IOException ex) {
+      throw new DelegationTokenManagerException(
+        DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
+    }
+  }
+
+  /**
+   * Verifies a delegation token.
+   *
+   * @param token delegation token to verify.
+   * @return the UGI for the token.
+   * @throws DelegationTokenManagerException thrown if the token could not be
+   * verified.
+   */
+  @Override
+  public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
+    throws DelegationTokenManagerException {
+    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+    DataInputStream dis = new DataInputStream(buf);
+    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+    try {
+      id.readFields(dis);
+      dis.close();
+      secretManager.verifyToken(id, token.getPassword());
+    } catch (Exception ex) {
+      throw new DelegationTokenManagerException(
+        DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
+    }
+    return id.getUser();
+  }
+
+  private static class DelegationTokenSecretManager
+    extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
+
+    /**
+     * Create a secret manager
+     *
+     * @param delegationKeyUpdateInterval the number of seconds for rolling new
+     * secret keys.
+     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+     * tokens
+     * @param delegationTokenRenewInterval how often the tokens must be renewed
+     * @param delegationTokenRemoverScanInterval how often the tokens are
+     * scanned
+     * for expired tokens
+     */
+    public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
+                                        long delegationTokenMaxLifetime,
+                                        long delegationTokenRenewInterval,
+                                        long delegationTokenRemoverScanInterval) {
+      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+            delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+    }
+
+    @Override
+    public DelegationTokenIdentifier createIdentifier() {
+      return new DelegationTokenIdentifier();
+    }
+
+  }
+
+}

+ 65 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java

@@ -18,12 +18,16 @@
 
 
 package org.apache.hadoop.lib.servlet;
 package org.apache.hadoop.lib.servlet;
 
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.lib.server.Server;
 import org.apache.hadoop.lib.server.Server;
 import org.apache.hadoop.lib.server.ServerException;
 import org.apache.hadoop.lib.server.ServerException;
 
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
 import javax.servlet.ServletContextListener;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
 import java.text.MessageFormat;
 import java.text.MessageFormat;
 
 
 /**
 /**
@@ -36,9 +40,13 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
   private static final String CONFIG_DIR = ".config.dir";
   private static final String CONFIG_DIR = ".config.dir";
   private static final String LOG_DIR = ".log.dir";
   private static final String LOG_DIR = ".log.dir";
   private static final String TEMP_DIR = ".temp.dir";
   private static final String TEMP_DIR = ".temp.dir";
+  private static final String HTTP_HOSTNAME = ".http.hostname";
+  private static final String HTTP_PORT = ".http.port";
 
 
   private static ThreadLocal<String> HOME_DIR_TL = new ThreadLocal<String>();
   private static ThreadLocal<String> HOME_DIR_TL = new ThreadLocal<String>();
 
 
+  private InetSocketAddress authority;
+
   /**
   /**
    * Method for testing purposes.
    * Method for testing purposes.
    */
    */
@@ -146,6 +154,38 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
     }
     }
   }
   }
 
 
+  /**
+   * Resolves the host & port InetSocketAddress the web server is listening to.
+   * <p/>
+   * This implementation looks for the following 2 properties:
+   * <ul>
+   *   <li>#SERVER_NAME#.http.hostname</li>
+   *   <li>#SERVER_NAME#.http.port</li>
+   * </ul>
+   *
+   * @return the host & port InetSocketAddress the web server is listening to.
+   * @throws ServerException thrown if any of the above 2 properties is not defined.
+   */
+  protected InetSocketAddress resolveAuthority() throws ServerException {
+    String hostnameKey = getName() + HTTP_HOSTNAME;
+    String portKey = getName() + HTTP_PORT;
+    String host = System.getProperty(hostnameKey);
+    String port = System.getProperty(portKey);
+    if (host == null) {
+      throw new ServerException(ServerException.ERROR.S13, hostnameKey);
+    }
+    if (port == null) {
+      throw new ServerException(ServerException.ERROR.S13, portKey);
+    }
+    try {
+      InetAddress add = InetAddress.getByName(hostnameKey);
+      int portNum = Integer.parseInt(port);
+      return new InetSocketAddress(add, portNum);
+    } catch (UnknownHostException ex) {
+      throw new ServerException(ServerException.ERROR.S14, ex.toString(), ex);
+    }
+  }
+
   /**
   /**
    * Destroys the <code>ServletContextListener</code> which destroys
    * Destroys the <code>ServletContextListener</code> which destroys
    * the Server.
    * the Server.
@@ -156,4 +196,29 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
     destroy();
     destroy();
   }
   }
 
 
+  /**
+   * Returns the hostname:port InetSocketAddress the webserver is listening to.
+   *
+   * @return the hostname:port InetSocketAddress the webserver is listening to.
+   */
+  public InetSocketAddress getAuthority() throws ServerException {
+    synchronized (this) {
+      if (authority == null) {
+          authority = resolveAuthority();
+      }
+    }
+    return authority;
+  }
+
+  /**
+   * Sets an alternate hostname:port InetSocketAddress to use.
+   * <p/>
+   * For testing purposes.
+   * 
+   * @param authority alterante authority.
+   */
+  @VisibleForTesting
+  public void setAuthority(InetSocketAddress authority) {
+    this.authority = authority;
+  }
 }
 }

+ 29 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml

@@ -35,6 +35,7 @@
       org.apache.hadoop.lib.service.scheduler.SchedulerService,
       org.apache.hadoop.lib.service.scheduler.SchedulerService,
       org.apache.hadoop.lib.service.security.GroupsService,
       org.apache.hadoop.lib.service.security.GroupsService,
       org.apache.hadoop.lib.service.security.ProxyUserService,
       org.apache.hadoop.lib.service.security.ProxyUserService,
+      org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
       org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
       org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
     </value>
     </value>
     <description>
     <description>
@@ -88,12 +89,12 @@
     <description>
     <description>
       Defines the authentication mechanism used by httpfs for its HTTP clients.
       Defines the authentication mechanism used by httpfs for its HTTP clients.
 
 
-      Valid values are 'simple' and 'kerberos'.
+      Valid values are 'simple' or 'kerberos'.
 
 
       If using 'simple' HTTP clients must specify the username with the
       If using 'simple' HTTP clients must specify the username with the
       'user.name' query string parameter.
       'user.name' query string parameter.
 
 
-      If using 'kerberos' HTTP clients must use HTTP SPNEGO.
+      If using 'kerberos' HTTP clients must use HTTP SPNEGO or delegation tokens.
     </description>
     </description>
   </property>
   </property>
 
 
@@ -153,6 +154,32 @@
     </description>
     </description>
   </property>
   </property>
 
 
+  <!-- HttpFS Delegation Token configuration -->
+
+  <property>
+    <name>httpfs.delegation.token.manager.update.interval</name>
+    <value>86400</value>
+    <description>
+      HttpFS delegation token update interval, default 1 day, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>httpfs.delegation.token.manager.max.lifetime</name>
+    <value>604800</value>
+    <description>
+      HttpFS delegation token maximum lifetime, default 7 days, in seconds
+    </description>
+  </property>
+
+  <property>
+    <name>httpfs.delegation.token.manager.renewal.interval</name>
+    <value>86400</value>
+    <description>
+      HttpFS delegation token update interval, default 1 day, in seconds.
+    </description>
+  </property>
+
   <!-- FileSystemAccess Namenode Security Configuration -->
   <!-- FileSystemAccess Namenode Security Configuration -->
 
 
   <property>
   <property>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml

@@ -47,7 +47,7 @@
 
 
   <filter>
   <filter>
     <filter-name>authFilter</filter-name>
     <filter-name>authFilter</filter-name>
-    <filter-class>org.apache.hadoop.fs.http.server.AuthFilter</filter-class>
+    <filter-class>org.apache.hadoop.fs.http.server.HttpFSAuthenticationFilter</filter-class>
   </filter>
   </filter>
 
 
   <filter>
   <filter>

+ 14 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java

@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.io.Writer;
 import java.io.Writer;
+import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -100,16 +101,24 @@ public class TestHttpFSFileSystem extends HFSTestCase {
     server.start();
     server.start();
   }
   }
 
 
+  protected Class getFileSystemClass() {
+    return HttpFSFileSystem.class;
+  }
+
   protected FileSystem getHttpFileSystem() throws Exception {
   protected FileSystem getHttpFileSystem() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("fs.http.impl", HttpFSFileSystem.class.getName());
-    return FileSystem.get(TestJettyHelper.getJettyURL().toURI(), conf);
+    conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
+    URI uri = new URI("webhdfs://" +
+                      TestJettyHelper.getJettyURL().toURI().getAuthority());
+    return FileSystem.get(uri, conf);
   }
   }
 
 
   protected void testGet() throws Exception {
   protected void testGet() throws Exception {
     FileSystem fs = getHttpFileSystem();
     FileSystem fs = getHttpFileSystem();
     Assert.assertNotNull(fs);
     Assert.assertNotNull(fs);
-    Assert.assertEquals(fs.getUri(), TestJettyHelper.getJettyURL().toURI());
+    URI uri = new URI("webhdfs://" +
+                      TestJettyHelper.getJettyURL().toURI().getAuthority());
+    Assert.assertEquals(fs.getUri(), uri);
     fs.close();
     fs.close();
   }
   }
 
 
@@ -474,8 +483,9 @@ public class TestHttpFSFileSystem extends HFSTestCase {
     for (int i = 0; i < Operation.values().length; i++) {
     for (int i = 0; i < Operation.values().length; i++) {
       ops[i] = new Object[]{Operation.values()[i]};
       ops[i] = new Object[]{Operation.values()[i]};
     }
     }
+    //To test one or a subset of operations do:
+    //return Arrays.asList(new Object[][]{ new Object[]{Operation.OPEN}});
     return Arrays.asList(ops);
     return Arrays.asList(ops);
-//    return Arrays.asList(new Object[][]{ new Object[]{Operation.CREATE}});
   }
   }
 
 
   private Operation operation;
   private Operation operation;

+ 2 - 14
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java

@@ -36,20 +36,8 @@ public class TestWebhdfsFileSystem extends TestHttpFSFileSystem {
   }
   }
 
 
   @Override
   @Override
-  protected FileSystem getHttpFileSystem() throws Exception {
-    Configuration conf = new Configuration();
-    conf.set("fs.webhdfs.impl", WebHdfsFileSystem.class.getName());
-    URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
-    return FileSystem.get(uri, conf);
-  }
-
-  @Override
-  protected void testGet() throws Exception {
-    FileSystem fs = getHttpFileSystem();
-    Assert.assertNotNull(fs);
-    URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
-    Assert.assertEquals(fs.getUri(), uri);
-    fs.close();
+  protected Class getFileSystemClass() {
+    return WebHdfsFileSystem.class;
   }
   }
 
 
 }
 }

+ 11 - 17
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java → hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java

@@ -15,27 +15,21 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
+package org.apache.hadoop.fs.http.server;
 
 
-package org.apache.hadoop.fs.http.client;
+import javax.servlet.ServletException;
+import java.util.Properties;
 
 
+public class HttpFSKerberosAuthenticationHandlerForTesting
+  extends HttpFSKerberosAuthenticationHandler {
 
 
-import org.apache.hadoop.security.authentication.client.Authenticator;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-
-/**
- * A <code>KerberosAuthenticator</code> subclass that fallback to
- * {@link HttpPseudoAuthenticator}.
- */
-public class HttpKerberosAuthenticator extends KerberosAuthenticator {
+  @Override
+  public void init(Properties config) throws ServletException {
+    //NOP overwrite to avoid Kerberos initialization
+  }
 
 
-  /**
-   * Returns the fallback authenticator if the server does not use
-   * Kerberos SPNEGO HTTP authentication.
-   *
-   * @return a {@link HttpPseudoAuthenticator} instance.
-   */
   @Override
   @Override
-  protected Authenticator getFallBackAuthenticator() {
-    return new HttpPseudoAuthenticator();
+  public void destroy() {
+    //NOP overwrite to avoid Kerberos initialization
   }
   }
 }
 }

+ 310 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java

@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.server;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
+import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
+import org.apache.hadoop.lib.service.DelegationTokenManager;
+import org.apache.hadoop.lib.service.DelegationTokenManagerException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.HFSTestCase;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
+
+  @Test
+  @TestDir
+  public void testManagementOperations() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+
+    Configuration httpfsConf = new Configuration(false);
+    HttpFSServerWebApp server =
+      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
+    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 
+                                              14000));
+    AuthenticationHandler handler =
+      new HttpFSKerberosAuthenticationHandlerForTesting();
+    try {
+      server.init();
+      handler.init(null);
+
+      testNonManagementOperation(handler);
+      testManagementOperationErrors(handler);
+      testGetToken(handler, false, null);
+      testGetToken(handler, true, null);
+      testGetToken(handler, false, "foo");
+      testGetToken(handler, true, "foo");
+      testCancelToken(handler);
+      testRenewToken(handler);
+
+    } finally {
+      if (handler != null) {
+        handler.destroy();
+      }
+    server.destroy();
+    }
+  }
+
+  private void testNonManagementOperation(AuthenticationHandler handler)
+    throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(null);
+    Assert.assertTrue(handler.managementOperation(null, request, null));
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
+    Assert.assertTrue(handler.managementOperation(null, request, null));
+  }
+
+  private void testManagementOperationErrors(AuthenticationHandler handler)
+    throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
+    Mockito.when(request.getMethod()).thenReturn("FOO");
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+      Mockito.startsWith("Wrong HTTP method"));
+
+    Mockito.reset(response);
+    Mockito.when(request.getMethod()).
+      thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+      Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
+      Mockito.contains("requires SPNEGO"));
+  }
+
+  private void testGetToken(AuthenticationHandler handler, boolean tokens,
+                            String renewer)
+    throws Exception {
+    DelegationTokenOperation op =
+      (tokens) ? DelegationTokenOperation.GETDELEGATIONTOKENS
+               : DelegationTokenOperation.GETDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(op.toString());
+    Mockito.when(request.getMethod()).
+      thenReturn(op.getHttpMethod());
+
+    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+    Mockito.when(token.getUserName()).thenReturn("user");
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
+      thenReturn(renewer);
+
+    Mockito.reset(response);
+    StringWriter writer = new StringWriter();
+    PrintWriter pwriter = new PrintWriter(writer);
+    Mockito.when(response.getWriter()).thenReturn(pwriter);
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    if (renewer == null) {
+      Mockito.verify(token).getUserName();
+    } else {
+      Mockito.verify(token, Mockito.never()).getUserName();
+    }
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
+    pwriter.close();
+    String responseOutput = writer.toString();
+    String tokenLabel = (tokens)
+                        ? HttpFSKerberosAuthenticator.DELEGATION_TOKENS_JSON
+                        : HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
+    if (tokens) {
+      Assert.assertTrue(responseOutput.contains(tokenLabel));
+    } else {
+      Assert.assertTrue(responseOutput.contains(tokenLabel));
+    }
+    Assert.assertTrue(responseOutput.contains(
+      HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
+    JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
+    json = (JSONObject) json.get(tokenLabel);
+    String tokenStr;
+    if (tokens) {
+      json = (JSONObject) ((JSONArray)
+        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON)).get(0);
+    }
+    tokenStr = (String)
+      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+    Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
+    dt.decodeFromUrlString(tokenStr);
+    HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
+  }
+
+  private void testCancelToken(AuthenticationHandler handler)
+    throws Exception {
+    DelegationTokenOperation op =
+      DelegationTokenOperation.CANCELDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(op.toString());
+    Mockito.when(request.getMethod()).
+      thenReturn(op.getHttpMethod());
+
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+      Mockito.contains("requires the parameter [token]"));
+
+    Mockito.reset(response);
+    Token<DelegationTokenIdentifier> token =
+      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
+        UserGroupInformation.getCurrentUser(), "foo");
+    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
+      thenReturn(token.encodeToUrlString());
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    try {
+      HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
+      Assert.fail();
+    }
+    catch (DelegationTokenManagerException ex) {
+      Assert.assertTrue(ex.toString().contains("DT01"));
+    }
+  }
+
+  private void testRenewToken(AuthenticationHandler handler)
+    throws Exception {
+    DelegationTokenOperation op =
+      DelegationTokenOperation.RENEWDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
+      thenReturn(op.toString());
+    Mockito.when(request.getMethod()).
+      thenReturn(op.getHttpMethod());
+
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+      Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
+      Mockito.contains("equires SPNEGO authentication established"));
+
+    Mockito.reset(response);
+    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+    Mockito.when(token.getUserName()).thenReturn("user");
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    Mockito.verify(response).sendError(
+      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+      Mockito.contains("requires the parameter [token]"));
+
+    Mockito.reset(response);
+    StringWriter writer = new StringWriter();
+    PrintWriter pwriter = new PrintWriter(writer);
+    Mockito.when(response.getWriter()).thenReturn(pwriter);
+    Token<DelegationTokenIdentifier> dToken =
+      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
+        UserGroupInformation.getCurrentUser(), "user");
+    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
+      thenReturn(dToken.encodeToUrlString());
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    pwriter.close();
+    Assert.assertTrue(writer.toString().contains("long"));
+    HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
+  }
+
+  @Test
+  @TestDir
+  public void testAuthenticate() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+
+    Configuration httpfsConf = new Configuration(false);
+    HttpFSServerWebApp server =
+      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
+    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
+                                              14000));
+    AuthenticationHandler handler =
+      new HttpFSKerberosAuthenticationHandlerForTesting();
+    try {
+      server.init();
+      handler.init(null);
+
+      testValidDelegationToken(handler);
+      testInvalidDelegationToken(handler);
+    } finally {
+      if (handler != null) {
+        handler.destroy();
+      }
+    server.destroy();
+    }
+  }
+
+  private void testValidDelegationToken(AuthenticationHandler handler)
+    throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Token<DelegationTokenIdentifier> dToken =
+      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
+        UserGroupInformation.getCurrentUser(), "user");
+    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
+      thenReturn(dToken.encodeToUrlString());
+
+    AuthenticationToken token = handler.authenticate(request, response);
+    Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
+                        token.getUserName());
+    Assert.assertEquals(0, token.getExpires());
+    Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
+                        token.getType());
+    Assert.assertTrue(token.isExpired());
+  }
+
+  private void testInvalidDelegationToken(AuthenticationHandler handler)
+    throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
+      thenReturn("invalid");
+
+    try {
+      handler.authenticate(request, response);
+      Assert.fail();
+    } catch (AuthenticationException ex) {
+      //NOP
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+  }
+
+}

+ 111 - 18
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -15,11 +15,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-
 package org.apache.hadoop.fs.http.server;
 package org.apache.hadoop.fs.http.server;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import org.junit.Assert;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
@@ -39,9 +37,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
 import org.apache.hadoop.lib.service.Groups;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.HadoopUsersConfTestHelper;
 import org.apache.hadoop.test.HadoopUsersConfTestHelper;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDir;
@@ -50,6 +52,8 @@ import org.apache.hadoop.test.TestHdfs;
 import org.apache.hadoop.test.TestHdfsHelper;
 import org.apache.hadoop.test.TestHdfsHelper;
 import org.apache.hadoop.test.TestJetty;
 import org.apache.hadoop.test.TestJetty;
 import org.apache.hadoop.test.TestJettyHelper;
 import org.apache.hadoop.test.TestJettyHelper;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
 import org.junit.Test;
 import org.junit.Test;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.jetty.webapp.WebAppContext;
@@ -103,11 +107,13 @@ public class TestHttpFSServer extends HFSTestCase {
     }
     }
 
 
   }
   }
-  private void createHttpFSServer() throws Exception {
+
+  private void createHttpFSServer(boolean addDelegationTokenAuthHandler)
+    throws Exception {
     File homeDir = TestDirHelper.getTestDir();
     File homeDir = TestDirHelper.getTestDir();
-    assertTrue(new File(homeDir, "conf").mkdir());
-    assertTrue(new File(homeDir, "log").mkdir());
-    assertTrue(new File(homeDir, "temp").mkdir());
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
     HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
     HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
 
 
     File secretFile = new File(new File(homeDir, "conf"), "secret");
     File secretFile = new File(new File(homeDir, "conf"), "secret");
@@ -128,6 +134,10 @@ public class TestHttpFSServer extends HFSTestCase {
 
 
     //HTTPFS configuration
     //HTTPFS configuration
     conf = new Configuration(false);
     conf = new Configuration(false);
+    if (addDelegationTokenAuthHandler) {
+     conf.set("httpfs.authentication.type",
+              HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
+    }
     conf.set("httpfs.services.ext", MockGroups.class.getName());
     conf.set("httpfs.services.ext", MockGroups.class.getName());
     conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
     conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
       getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
       getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
@@ -147,6 +157,9 @@ public class TestHttpFSServer extends HFSTestCase {
     Server server = TestJettyHelper.getJettyServer();
     Server server = TestJettyHelper.getJettyServer();
     server.addHandler(context);
     server.addHandler(context);
     server.start();
     server.start();
+    if (addDelegationTokenAuthHandler) {
+      HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
+    }
   }
   }
 
 
   @Test
   @Test
@@ -154,28 +167,28 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestJetty
   @TestHdfs
   @TestHdfs
   public void instrumentation() throws Exception {
   public void instrumentation() throws Exception {
-    createHttpFSServer();
+    createHttpFSServer(false);
 
 
     URL url = new URL(TestJettyHelper.getJettyURL(),
     URL url = new URL(TestJettyHelper.getJettyURL(),
                       MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
                       MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
 
 
     url = new URL(TestJettyHelper.getJettyURL(),
     url = new URL(TestJettyHelper.getJettyURL(),
                   MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
                   MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
                                        HadoopUsersConfTestHelper.getHadoopUsers()[0]));
                                        HadoopUsersConfTestHelper.getHadoopUsers()[0]));
     conn = (HttpURLConnection) url.openConnection();
     conn = (HttpURLConnection) url.openConnection();
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     String line = reader.readLine();
     String line = reader.readLine();
     reader.close();
     reader.close();
-    assertTrue(line.contains("\"counters\":{"));
+    Assert.assertTrue(line.contains("\"counters\":{"));
 
 
     url = new URL(TestJettyHelper.getJettyURL(),
     url = new URL(TestJettyHelper.getJettyURL(),
                   MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
                   MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
                                        HadoopUsersConfTestHelper.getHadoopUsers()[0]));
                                        HadoopUsersConfTestHelper.getHadoopUsers()[0]));
     conn = (HttpURLConnection) url.openConnection();
     conn = (HttpURLConnection) url.openConnection();
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
   }
   }
 
 
   @Test
   @Test
@@ -183,13 +196,13 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestJetty
   @TestHdfs
   @TestHdfs
   public void testHdfsAccess() throws Exception {
   public void testHdfsAccess() throws Exception {
-    createHttpFSServer();
+    createHttpFSServer(false);
 
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
     URL url = new URL(TestJettyHelper.getJettyURL(),
                       MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
                       MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     reader.readLine();
     reader.readLine();
     reader.close();
     reader.close();
@@ -200,7 +213,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestJetty
   @TestHdfs
   @TestHdfs
   public void testGlobFilter() throws Exception {
   public void testGlobFilter() throws Exception {
-    createHttpFSServer();
+    createHttpFSServer(false);
 
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path("/tmp"));
     fs.mkdirs(new Path("/tmp"));
@@ -210,7 +223,7 @@ public class TestHttpFSServer extends HFSTestCase {
     URL url = new URL(TestJettyHelper.getJettyURL(),
     URL url = new URL(TestJettyHelper.getJettyURL(),
                       MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
                       MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
     reader.readLine();
     reader.readLine();
     reader.close();
     reader.close();
@@ -221,7 +234,7 @@ public class TestHttpFSServer extends HFSTestCase {
   @TestJetty
   @TestJetty
   @TestHdfs
   @TestHdfs
   public void testPutNoOperation() throws Exception {
   public void testPutNoOperation() throws Exception {
-    createHttpFSServer();
+    createHttpFSServer(false);
 
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
     URL url = new URL(TestJettyHelper.getJettyURL(),
@@ -230,7 +243,87 @@ public class TestHttpFSServer extends HFSTestCase {
     conn.setDoInput(true);
     conn.setDoInput(true);
     conn.setDoOutput(true);
     conn.setDoOutput(true);
     conn.setRequestMethod("PUT");
     conn.setRequestMethod("PUT");
-    assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenOperations() throws Exception {
+    createHttpFSServer(true);
+
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=GETHOMEDIRECTORY");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+                        conn.getResponseCode());
+
+
+    AuthenticationToken token =
+      new AuthenticationToken("u", "p",
+        HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
+    token.setExpires(System.currentTimeMillis() + 100000000);
+    Signer signer = new Signer("secret".getBytes());
+    String tokenSigned = signer.sign(token.toString());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETHOMEDIRECTORY");
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestProperty("Cookie",
+                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestProperty("Cookie",
+                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    JSONObject json = (JSONObject)
+      new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
+    json = (JSONObject)
+      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+    String tokenStr = (String)
+        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.setRequestProperty("Cookie",
+                            AuthenticatedURL.AUTH_COOKIE  + "=" + tokenSigned);
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    Assert.assertEquals(HttpURLConnection.HTTP_OK,
+                        conn.getResponseCode());
+
+    url = new URL(TestJettyHelper.getJettyURL(),
+                  "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
+    conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+                        conn.getResponseCode());
   }
   }
 
 
 }
 }

+ 291 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java

@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.HFSTestCase;
+import org.apache.hadoop.test.KerberosTestUtils;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.test.TestHdfs;
+import org.apache.hadoop.test.TestHdfsHelper;
+import org.apache.hadoop.test.TestJetty;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.junit.After;
+import org.junit.Test;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.Callable;
+
+public class TestHttpFSWithKerberos extends HFSTestCase {
+
+  @After
+  public void resetUGI() {
+    Configuration conf = new Configuration();
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  private void createHttpFSServer() throws Exception {
+    File homeDir = TestDirHelper.getTestDir();
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
+
+    File secretFile = new File(new File(homeDir, "conf"), "secret");
+    Writer w = new FileWriter(secretFile);
+    w.write("secret");
+    w.close();
+
+    //HDFS configuration
+    File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
+    hadoopConfDir.mkdirs();
+    String fsDefaultName = TestHdfsHelper.getHdfsConf()
+      .get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
+    Configuration conf = new Configuration(false);
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
+    OutputStream os = new FileOutputStream(hdfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    conf = new Configuration(false);
+    conf.set("httpfs.proxyuser.client.hosts", "*");
+    conf.set("httpfs.proxyuser.client.groups", "*");
+
+    conf.set("httpfs.authentication.type", "kerberos");
+
+    conf.set("httpfs.authentication.signature.secret.file",
+             secretFile.getAbsolutePath());
+    File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+    os = new FileOutputStream(httpfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    URL url = cl.getResource("webapp");
+    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
+    Server server = TestJettyHelper.getJettyServer();
+    server.addHandler(context);
+    server.start();
+    HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testValidHttpFSAccess() throws Exception {
+    createHttpFSServer();
+
+    KerberosTestUtils.doAsClient(new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        URL url = new URL(TestJettyHelper.getJettyURL(),
+                          "/webhdfs/v1/?op=GETHOMEDIRECTORY");
+        AuthenticatedURL aUrl = new AuthenticatedURL();
+        AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
+        HttpURLConnection conn = aUrl.openConnection(url, aToken);
+        Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testInvalidadHttpFSAccess() throws Exception {
+    createHttpFSServer();
+
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=GETHOMEDIRECTORY");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(),
+                        HttpURLConnection.HTTP_UNAUTHORIZED);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenHttpFSAccess() throws Exception {
+    createHttpFSServer();
+
+    KerberosTestUtils.doAsClient(new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        //get delegation token doing SPNEGO authentication
+        URL url = new URL(TestJettyHelper.getJettyURL(),
+                          "/webhdfs/v1/?op=GETDELEGATIONTOKEN");
+        AuthenticatedURL aUrl = new AuthenticatedURL();
+        AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
+        HttpURLConnection conn = aUrl.openConnection(url, aToken);
+        Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+        JSONObject json = (JSONObject) new JSONParser()
+          .parse(new InputStreamReader(conn.getInputStream()));
+        json =
+          (JSONObject) json
+            .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+        String tokenStr = (String) json
+          .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+
+        //access httpfs using the delegation token
+        url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
+                      tokenStr);
+        conn = (HttpURLConnection) url.openConnection();
+        Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+
+        //try to renew the delegation token without SPNEGO credentials
+        url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+        conn = (HttpURLConnection) url.openConnection();
+        conn.setRequestMethod("PUT");
+        Assert.assertEquals(conn.getResponseCode(),
+                            HttpURLConnection.HTTP_UNAUTHORIZED);
+
+        //renew the delegation token with SPNEGO credentials
+        url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
+        conn = aUrl.openConnection(url, aToken);
+        conn.setRequestMethod("PUT");
+        Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+
+        //cancel delegation token, no need for SPNEGO credentials
+        url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" +
+                      tokenStr);
+        conn = (HttpURLConnection) url.openConnection();
+        conn.setRequestMethod("PUT");
+        Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+
+        //try to access httpfs with the canceled delegation token
+        url = new URL(TestJettyHelper.getJettyURL(),
+                      "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
+                      tokenStr);
+        conn = (HttpURLConnection) url.openConnection();
+        Assert.assertEquals(conn.getResponseCode(),
+                            HttpURLConnection.HTTP_UNAUTHORIZED);
+        return null;
+      }
+    });
+  }
+
+  @SuppressWarnings("deprecation")
+  private void testDelegationTokenWithFS(Class fileSystemClass)
+    throws Exception {
+    createHttpFSServer();
+    Configuration conf = new Configuration();
+    conf.set("fs.webhdfs.impl", fileSystemClass.getName());
+    conf.set("fs.hdfs.impl.disable.cache", "true");
+    URI uri = new URI( "webhdfs://" +
+                       TestJettyHelper.getJettyURL().toURI().getAuthority());
+    FileSystem fs = FileSystem.get(uri, conf);
+    Token<?> token = fs.getDelegationToken("foo");
+    fs.close();
+    fs = FileSystem.get(uri, conf);
+    ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(token);
+    fs.listStatus(new Path("/"));
+    fs.close();
+  }
+
+  private void testDelegationTokenWithinDoAs(
+    final Class fileSystemClass, boolean proxyUser) throws Exception {
+    Configuration conf = new Configuration();
+    conf.set("hadoop.security.authentication", "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.loginUserFromKeytab("client",
+                                             "/Users/tucu/tucu.keytab");
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+    if (proxyUser) {
+      ugi = UserGroupInformation.createProxyUser("foo", ugi);
+    }
+    conf = new Configuration();
+    UserGroupInformation.setConfiguration(conf);
+    ugi.doAs(
+      new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          testDelegationTokenWithFS(fileSystemClass);
+          return null;
+        }
+      });
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenWithHttpFSFileSystem() throws Exception {
+    testDelegationTokenWithinDoAs(HttpFSFileSystem.class, false);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenWithWebhdfsFileSystem() throws Exception {
+    testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false);
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDelegationTokenWithHttpFSFileSystemProxyUser()
+    throws Exception {
+    testDelegationTokenWithinDoAs(HttpFSFileSystem.class, true);
+  }
+
+  // TODO: WebHdfsFilesystem does work with ProxyUser HDFS-3509
+  //    @Test
+  //    @TestDir
+  //    @TestJetty
+  //    @TestHdfs
+  //    public void testDelegationTokenWithWebhdfsFileSystemProxyUser()
+  //      throws Exception {
+  //      testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, true);
+  //    }
+
+}

+ 83 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java

@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.service.security;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.lib.server.Server;
+import org.apache.hadoop.lib.service.DelegationTokenManager;
+import org.apache.hadoop.lib.service.DelegationTokenManagerException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.HTestCase;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Test;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+
+public class TestDelegationTokenManagerService extends HTestCase {
+
+  @Test
+  @TestDir
+  public void service() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+    Configuration conf = new Configuration(false);
+    conf.set("server.services", StringUtils.join(",",
+      Arrays.asList(DelegationTokenManagerService.class.getName())));
+    Server server = new Server("server", dir, dir, dir, dir, conf);
+    server.init();
+    DelegationTokenManager tm = server.get(DelegationTokenManager.class);
+    Assert.assertNotNull(tm);
+    server.destroy();
+  }
+
+  @Test
+  @TestDir
+  @SuppressWarnings("unchecked")
+  public void tokens() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+    Configuration conf = new Configuration(false);
+    conf.set("server.services", StringUtils.join(",",
+      Arrays.asList(DelegationTokenManagerService.class.getName())));
+    HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
+    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
+    server.init();
+    DelegationTokenManager tm = server.get(DelegationTokenManager.class);
+    Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
+    Assert.assertNotNull(token);
+    tm.verifyToken(token);
+    Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
+    tm.cancelToken(token, "foo");
+    try {
+      tm.verifyToken(token);
+      Assert.fail();
+    } catch (DelegationTokenManagerException ex) {
+      //NOP
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+    server.destroy();
+  }
+
+}

+ 138 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java

@@ -0,0 +1,138 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.test;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+
+import java.io.File;
+import java.security.Principal;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
+/**
+ * Test helper class for Java Kerberos setup.
+ */
+public class KerberosTestUtils {
+  private static final String PREFIX = "httpfs.test.";
+
+  public static final String REALM = PREFIX + "kerberos.realm";
+
+  public static final String CLIENT_PRINCIPAL =
+    PREFIX + "kerberos.client.principal";
+
+  public static final String SERVER_PRINCIPAL =
+    PREFIX + "kerberos.server.principal";
+
+  public static final String KEYTAB_FILE = PREFIX + "kerberos.keytab.file";
+
+  public static String getRealm() {
+    return System.getProperty(REALM, "LOCALHOST");
+  }
+
+  public static String getClientPrincipal() {
+    return System.getProperty(CLIENT_PRINCIPAL, "client") + "@" + getRealm();
+  }
+
+  public static String getServerPrincipal() {
+    return System.getProperty(SERVER_PRINCIPAL,
+                              "HTTP/localhost") + "@" + getRealm();
+  }
+
+  public static String getKeytabFile() {
+    String keytabFile =
+      new File(System.getProperty("user.home"),
+               System.getProperty("user.name") + ".keytab").toString();
+    return System.getProperty(KEYTAB_FILE, keytabFile);
+  }
+
+  private static class KerberosConfiguration extends Configuration {
+    private String principal;
+
+    public KerberosConfiguration(String principal) {
+      this.principal = principal;
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+      Map<String, String> options = new HashMap<String, String>();
+      options.put("keyTab", KerberosTestUtils.getKeytabFile());
+      options.put("principal", principal);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("doNotPrompt", "true");
+      options.put("useTicketCache", "true");
+      options.put("renewTGT", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("isInitiator", "true");
+      String ticketCache = System.getenv("KRB5CCNAME");
+      if (ticketCache != null) {
+        options.put("ticketCache", ticketCache);
+      }
+      options.put("debug", "true");
+
+      return new AppConfigurationEntry[]{
+        new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+                                  AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                                  options),};
+    }
+  }
+
+  public static <T> T doAs(String principal, final Callable<T> callable)
+    throws Exception {
+    LoginContext loginContext = null;
+    try {
+      Set<Principal> principals = new HashSet<Principal>();
+      principals.add(
+        new KerberosPrincipal(KerberosTestUtils.getClientPrincipal()));
+      Subject subject = new Subject(false, principals, new HashSet<Object>(),
+                                    new HashSet<Object>());
+      loginContext = new LoginContext("", subject, null,
+                                      new KerberosConfiguration(principal));
+      loginContext.login();
+      subject = loginContext.getSubject();
+      return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
+        @Override
+        public T run() throws Exception {
+          return callable.call();
+        }
+      });
+    } catch (PrivilegedActionException ex) {
+      throw ex.getException();
+    } finally {
+      if (loginContext != null) {
+        loginContext.logout();
+      }
+    }
+  }
+
+  public static <T> T doAsClient(Callable<T> callable) throws Exception {
+    return doAs(getClientPrincipal(), callable);
+  }
+
+  public static <T> T doAsServer(Callable<T> callable) throws Exception {
+    return doAs(getServerPrincipal(), callable);
+  }
+
+}

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java

@@ -73,7 +73,7 @@ public class TestDirHelper implements MethodRule {
         System.exit(-1);
         System.exit(-1);
       }
       }
 
 
-      TEST_DIR_ROOT = new File(TEST_DIR_ROOT, "testdir").getAbsolutePath();
+      TEST_DIR_ROOT = new File(TEST_DIR_ROOT, "test-dir").getAbsolutePath();
       System.setProperty(TEST_DIR_PROP, TEST_DIR_ROOT);
       System.setProperty(TEST_DIR_PROP, TEST_DIR_ROOT);
 
 
       File dir = new File(TEST_DIR_ROOT);
       File dir = new File(TEST_DIR_ROOT);
@@ -83,8 +83,6 @@ public class TestDirHelper implements MethodRule {
         System.exit(-1);
         System.exit(-1);
       }
       }
 
 
-      System.setProperty("test.circus", "true");
-
       System.out.println(">>> " + TEST_DIR_PROP + "        : " + System.getProperty(TEST_DIR_PROP));
       System.out.println(">>> " + TEST_DIR_PROP + "        : " + System.getProperty(TEST_DIR_PROP));
     } catch (IOException ex) {
     } catch (IOException ex) {
       throw new RuntimeException(ex);
       throw new RuntimeException(ex);

+ 22 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java

@@ -18,9 +18,11 @@
 package org.apache.hadoop.test;
 package org.apache.hadoop.test;
 
 
 import java.net.InetAddress;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
 import java.net.URL;
 import java.net.URL;
+import java.net.UnknownHostException;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.rules.MethodRule;
 import org.junit.rules.MethodRule;
@@ -65,9 +67,9 @@ public class TestJettyHelper implements MethodRule {
 
 
   private Server createJettyServer() {
   private Server createJettyServer() {
     try {
     try {
-
-      String host = InetAddress.getLocalHost().getHostName();
-      ServerSocket ss = new ServerSocket(0);
+      InetAddress localhost = InetAddress.getByName("localhost");
+      String host = "localhost";
+      ServerSocket ss = new ServerSocket(0, 50, localhost);
       int port = ss.getLocalPort();
       int port = ss.getLocalPort();
       ss.close();
       ss.close();
       Server server = new Server(0);
       Server server = new Server(0);
@@ -79,6 +81,23 @@ public class TestJettyHelper implements MethodRule {
     }
     }
   }
   }
 
 
+  /**
+   * Returns the authority (hostname & port) used by the JettyServer.
+   *
+   * @return an <code>InetSocketAddress</code> with the corresponding authority.
+   */
+  public static InetSocketAddress getAuthority() {
+    Server server = getJettyServer();
+    try {
+      InetAddress add =
+        InetAddress.getByName(server.getConnectors()[0].getHost());
+      int port = server.getConnectors()[0].getPort();
+      return new InetSocketAddress(add, port);
+    } catch (UnknownHostException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
   /**
   /**
    * Returns a Jetty server ready to be configured and the started. This server
    * Returns a Jetty server ready to be configured and the started. This server
    * is only available when the test method has been annotated with
    * is only available when the test method has been annotated with

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/krb5.conf

@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+[libdefaults]
+	default_realm = ${kerberos.realm}
+	udp_preference_limit = 1
+	extra_addresses = 127.0.0.1
+[realms]
+	${kerberos.realm} = {
+		admin_server = localhost:88
+		kdc = localhost:88
+	}
+[domain_realm]
+	localhost = ${kerberos.realm}

+ 36 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -109,8 +109,6 @@ Trunk (unreleased changes)
 
 
     HDFS-3630 Modify TestPersistBlocks to use both flush and hflush  (sanjay)
     HDFS-3630 Modify TestPersistBlocks to use both flush and hflush  (sanjay)
 
 
-    HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
-
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -201,6 +199,8 @@ Branch-2 ( Unreleased changes )
     HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
     HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
     the given HDFS is healthy. (szetszwo)
     the given HDFS is healthy. (szetszwo)
 
 
+    HDFS-3113. httpfs does not support delegation tokens. (tucu)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -305,7 +305,8 @@ Branch-2 ( Unreleased changes )
     HDFS-3613. GSet prints some INFO level values, which aren't
     HDFS-3613. GSet prints some INFO level values, which aren't
     really very useful to all (Andrew Wang via harsh)
     really very useful to all (Andrew Wang via harsh)
 
 
-    HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping a few bytes. (Colin Patrick McCabe via harsh)
+    HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping 
+    a few bytes. (Colin Patrick McCabe via harsh)
 
 
     HDFS-3582. Hook daemon process exit for testing. (eli)
     HDFS-3582. Hook daemon process exit for testing. (eli)
 
 
@@ -352,6 +353,13 @@ Branch-2 ( Unreleased changes )
     HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
     HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
     argument fails with Input/output error. (Colin Patrick McCabe via eli)
     argument fails with Input/output error. (Colin Patrick McCabe via eli)
 
 
+    HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
+
+    HDFS-3711. Manually convert remaining tests to JUnit4. (Andrew Wang via atm)
+
+    HDFS-3650. Use MutableQuantiles to provide latency histograms for various
+    operations. (Andrew Wang via atm)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-2982. Startup performance suffers when there are many edit log
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -363,6 +371,10 @@ Branch-2 ( Unreleased changes )
     HDFS-3110. Use directRead API to reduce the number of buffer copies in
     HDFS-3110. Use directRead API to reduce the number of buffer copies in
     libhdfs (Henry Robinson via todd)
     libhdfs (Henry Robinson via todd)
 
 
+    HDFS-3697. Enable fadvise readahead by default. (todd)
+
+    HDFS-3667.  Add retry support to WebHdfsFileSystem.  (szetszwo)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-3385. The last block of INodeFileUnderConstruction is not
     HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -518,6 +530,22 @@ Branch-2 ( Unreleased changes )
 
 
     HDFS-3690. BlockPlacementPolicyDefault incorrectly casts LOG. (eli)
     HDFS-3690. BlockPlacementPolicyDefault incorrectly casts LOG. (eli)
 
 
+    HDFS-3597. SNN fails to start after DFS upgrade. (Andy Isaacson via todd)
+
+    HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick
+    McCabe via atm)
+
+    HDFS-3709. TestStartup tests still binding to the ephemeral port. (eli)
+
+    HDFS-3720. hdfs.h must get packaged. (Colin Patrick McCabe via atm)
+
+    HDFS-3626. Creating file with invalid path can corrupt edit log (todd)
+
+    HDFS-3679. fuse_dfs notrash option sets usetrash. (Conrad Meyer via suresh)
+
+    HDFS-3732. fuse_dfs: incorrect configuration value checked for connection
+    expiry timer period. (Colin Patrick McCabe via atm)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -1388,6 +1416,11 @@ Release 0.23.3 - UNRELEASED
     HDFS-3646. LeaseRenewer can hold reference to inactive DFSClient
     HDFS-3646. LeaseRenewer can hold reference to inactive DFSClient
     instances forever. (Kihwal Lee via daryn)
     instances forever. (Kihwal Lee via daryn)
 
 
+    HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations
+    to get around a Java library bug causing OutOfMemoryError.  (szetszwo)
+
+    HDFS-3553. Hftp proxy tokens are broken (daryn)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt

@@ -87,6 +87,7 @@ include_directories(
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
     ${CMAKE_BINARY_DIR}
     ${JNI_INCLUDE_DIRS}
     ${JNI_INCLUDE_DIRS}
+    main/native
     main/native/libhdfs
     main/native/libhdfs
 )
 )
 
 

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/faultinject_framework.xml

@@ -332,13 +332,12 @@ package org.apache.hadoop.fs;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.Before;
-import junit.framework.TestCase;
 
 
-public class DemoFiTest extends TestCase {
+public class DemoFiTest {
   public static final String BLOCK_RECEIVER_FAULT="hdfs.datanode.BlockReceiver";
   public static final String BLOCK_RECEIVER_FAULT="hdfs.datanode.BlockReceiver";
   @Override
   @Override
   @Before
   @Before
-  public void setUp(){
+  public void setUp() {
     //Setting up the test's environment as required
     //Setting up the test's environment as required
   }
   }
 
 

+ 6 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java

@@ -57,9 +57,9 @@ public abstract class ByteRangeInputStream extends FSInputStream {
       return url;
       return url;
     }
     }
 
 
-    protected abstract HttpURLConnection openConnection() throws IOException;
-
-    protected abstract HttpURLConnection openConnection(final long offset) throws IOException;
+    /** Connect to server with a data offset. */
+    protected abstract HttpURLConnection connect(final long offset,
+        final boolean resolved) throws IOException;
   }
   }
 
 
   enum StreamStatus {
   enum StreamStatus {
@@ -85,9 +85,6 @@ public abstract class ByteRangeInputStream extends FSInputStream {
     this.resolvedURL = r;
     this.resolvedURL = r;
   }
   }
   
   
-  protected abstract void checkResponseCode(final HttpURLConnection connection
-      ) throws IOException;
-  
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
       ) throws IOException;
       ) throws IOException;
 
 
@@ -113,13 +110,10 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   protected InputStream openInputStream() throws IOException {
   protected InputStream openInputStream() throws IOException {
     // Use the original url if no resolved url exists, eg. if
     // Use the original url if no resolved url exists, eg. if
     // it's the first time a request is made.
     // it's the first time a request is made.
-    final URLOpener opener =
-      (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
-
-    final HttpURLConnection connection = opener.openConnection(startPos);
-    connection.connect();
-    checkResponseCode(connection);
+    final boolean resolved = resolvedURL.getURL() != null; 
+    final URLOpener opener = resolved? resolvedURL: originalURL;
 
 
+    final HttpURLConnection connection = opener.connect(startPos, resolved);
     final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
     final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
     if (cl == null) {
     if (cl == null) {
       throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
       throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -74,7 +74,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
   public static final String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
-  public static final long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 0;
+  public static final long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes";
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes";
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
   public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
   public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
@@ -203,6 +203,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
   public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
+  public static final String  DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
   public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
   public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
   public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
   public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
   public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
   public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -76,6 +76,8 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 
 
 /****************************************************************
 /****************************************************************
  * DFSOutputStream creates files from a stream of bytes.
  * DFSOutputStream creates files from a stream of bytes.
@@ -1210,7 +1212,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
   //
   //
   // returns the list of targets, if any, that is being currently used.
   // returns the list of targets, if any, that is being currently used.
   //
   //
-  synchronized DatanodeInfo[] getPipeline() {
+  @VisibleForTesting
+  public synchronized DatanodeInfo[] getPipeline() {
     if (streamer == null) {
     if (streamer == null) {
       return null;
       return null;
     }
     }
@@ -1752,11 +1755,13 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
     }
     }
   }
   }
 
 
-  void setArtificialSlowdown(long period) {
+  @VisibleForTesting
+  public void setArtificialSlowdown(long period) {
     artificialSlowdown = period;
     artificialSlowdown = period;
   }
   }
 
 
-  synchronized void setChunksPerPacket(int value) {
+  @VisibleForTesting
+  public synchronized void setChunksPerPacket(int value) {
     chunksPerPacket = Math.min(chunksPerPacket, value);
     chunksPerPacket = Math.min(chunksPerPacket, value);
     packetSize = PacketHeader.PKT_HEADER_LEN +
     packetSize = PacketHeader.PKT_HEADER_LEN +
                  (checksum.getBytesPerChecksum() + 
                  (checksum.getBytesPerChecksum() + 

+ 12 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -56,6 +56,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 
 
@@ -118,7 +119,7 @@ public class DFSUtil {
   
   
   /**
   /**
    * Whether the pathname is valid.  Currently prohibits relative paths, 
    * Whether the pathname is valid.  Currently prohibits relative paths, 
-   * and names which contain a ":" or "/" 
+   * names which contain a ":" or "//", or other non-canonical paths.
    */
    */
   public static boolean isValidName(String src) {
   public static boolean isValidName(String src) {
     // Path must be absolute.
     // Path must be absolute.
@@ -127,15 +128,22 @@ public class DFSUtil {
     }
     }
       
       
     // Check for ".." "." ":" "/"
     // Check for ".." "." ":" "/"
-    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
-    while(tokens.hasMoreTokens()) {
-      String element = tokens.nextToken();
+    String[] components = StringUtils.split(src, '/');
+    for (int i = 0; i < components.length; i++) {
+      String element = components[i];
       if (element.equals("..") || 
       if (element.equals("..") || 
           element.equals(".")  ||
           element.equals(".")  ||
           (element.indexOf(":") >= 0)  ||
           (element.indexOf(":") >= 0)  ||
           (element.indexOf("/") >= 0)) {
           (element.indexOf("/") >= 0)) {
         return false;
         return false;
       }
       }
+      
+      // The string may start or end with a /, but not have
+      // "//" in the middle.
+      if (element.isEmpty() && i != components.length - 1 &&
+          i != 0) {
+        return false;
+      }
     }
     }
     return true;
     return true;
   }
   }

+ 11 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -342,19 +342,28 @@ public class HftpFileSystem extends FileSystem
       super(url);
       super(url);
     }
     }
 
 
-    @Override
     protected HttpURLConnection openConnection() throws IOException {
     protected HttpURLConnection openConnection() throws IOException {
       return (HttpURLConnection)URLUtils.openConnection(url);
       return (HttpURLConnection)URLUtils.openConnection(url);
     }
     }
 
 
     /** Use HTTP Range header for specifying offset. */
     /** Use HTTP Range header for specifying offset. */
     @Override
     @Override
-    protected HttpURLConnection openConnection(final long offset) throws IOException {
+    protected HttpURLConnection connect(final long offset,
+        final boolean resolved) throws IOException {
       final HttpURLConnection conn = openConnection();
       final HttpURLConnection conn = openConnection();
       conn.setRequestMethod("GET");
       conn.setRequestMethod("GET");
       if (offset != 0L) {
       if (offset != 0L) {
         conn.setRequestProperty("Range", "bytes=" + offset + "-");
         conn.setRequestProperty("Range", "bytes=" + offset + "-");
       }
       }
+      conn.connect();
+
+      //Expects HTTP_OK or HTTP_PARTIAL response codes. 
+      final int code = conn.getResponseCode();
+      if (offset != 0L && code != HttpURLConnection.HTTP_PARTIAL) {
+        throw new IOException("HTTP_PARTIAL expected, received " + code);
+      } else if (offset == 0L && code != HttpURLConnection.HTTP_OK) {
+        throw new IOException("HTTP_OK expected, received " + code);
+      }
       return conn;
       return conn;
     }  
     }  
   }
   }
@@ -368,22 +377,6 @@ public class HftpFileSystem extends FileSystem
       this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null));
       this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null));
     }
     }
 
 
-    /** Expects HTTP_OK and HTTP_PARTIAL response codes. */
-    @Override
-    protected void checkResponseCode(final HttpURLConnection connection
-        ) throws IOException {
-      final int code = connection.getResponseCode();
-      if (startPos != 0 && code != HttpURLConnection.HTTP_PARTIAL) {
-        // We asked for a byte range but did not receive a partial content
-        // response...
-        throw new IOException("HTTP_PARTIAL expected, received " + code);
-      } else if (startPos == 0 && code != HttpURLConnection.HTTP_OK) {
-        // We asked for all bytes from the beginning but didn't receive a 200
-        // response (none of the other 2xx codes are valid here)
-        throw new IOException("HTTP_OK expected, received " + code);
-      }
-    }
-
     @Override
     @Override
     protected URL getResolvedUrl(final HttpURLConnection connection) {
     protected URL getResolvedUrl(final HttpURLConnection connection) {
       return connection.getURL();
       return connection.getURL();

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -259,7 +259,7 @@ public class NameNodeProxies {
    *     
    *     
    * Note that dfs.client.retry.max < 0 is not allowed.
    * Note that dfs.client.retry.max < 0 is not allowed.
    */
    */
-  private static RetryPolicy getDefaultRpcRetryPolicy(Configuration conf) {
+  public static RetryPolicy getDefaultRetryPolicy(Configuration conf) {
     final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf);
     final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf);
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
       LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
@@ -300,6 +300,13 @@ public class NameNodeProxies {
               + p.getClass().getSimpleName() + ", exception=" + e);
               + p.getClass().getSimpleName() + ", exception=" + e);
           return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
           return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
         }
         }
+
+        @Override
+        public String toString() {
+          return "RetryPolicy[" + multipleLinearRandomRetry + ", "
+              + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName()
+              + "]";
+        }
       };
       };
     }
     }
   }
   }
@@ -335,7 +342,7 @@ public class NameNodeProxies {
       boolean withRetries) throws IOException {
       boolean withRetries) throws IOException {
     RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
     RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
 
 
-    final RetryPolicy defaultPolicy = getDefaultRpcRetryPolicy(conf);
+    final RetryPolicy defaultPolicy = getDefaultRetryPolicy(conf);
     final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
     final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
     ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
     ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
         ClientNamenodeProtocolPB.class, version, address, ugi, conf,
         ClientNamenodeProtocolPB.class, version, address, ugi, conf,

+ 66 - 68
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -487,12 +487,17 @@ public class JspHelper {
    */
    */
   public static UserGroupInformation getDefaultWebUser(Configuration conf
   public static UserGroupInformation getDefaultWebUser(Configuration conf
                                                        ) throws IOException {
                                                        ) throws IOException {
+    return UserGroupInformation.createRemoteUser(getDefaultWebUserName(conf));
+  }
+
+  private static String getDefaultWebUserName(Configuration conf
+      ) throws IOException {
     String user = conf.get(
     String user = conf.get(
         HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
         HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
     if (user == null || user.length() == 0) {
     if (user == null || user.length() == 0) {
       throw new IOException("Cannot determine UGI from request or conf");
       throw new IOException("Cannot determine UGI from request or conf");
     }
     }
-    return UserGroupInformation.createRemoteUser(user);
+    return user;
   }
   }
 
 
   private static InetSocketAddress getNNServiceAddress(ServletContext context,
   private static InetSocketAddress getNNServiceAddress(ServletContext context,
@@ -538,65 +543,45 @@ public class JspHelper {
       HttpServletRequest request, Configuration conf,
       HttpServletRequest request, Configuration conf,
       final AuthenticationMethod secureAuthMethod,
       final AuthenticationMethod secureAuthMethod,
       final boolean tryUgiParameter) throws IOException {
       final boolean tryUgiParameter) throws IOException {
-    final UserGroupInformation ugi;
+    UserGroupInformation ugi = null;
     final String usernameFromQuery = getUsernameFromQuery(request, tryUgiParameter);
     final String usernameFromQuery = getUsernameFromQuery(request, tryUgiParameter);
     final String doAsUserFromQuery = request.getParameter(DoAsParam.NAME);
     final String doAsUserFromQuery = request.getParameter(DoAsParam.NAME);
-
-    if(UserGroupInformation.isSecurityEnabled()) {
-      final String remoteUser = request.getRemoteUser();
-      String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
+    final String remoteUser;
+   
+    if (UserGroupInformation.isSecurityEnabled()) {
+      remoteUser = request.getRemoteUser();
+      final String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
       if (tokenString != null) {
       if (tokenString != null) {
-        Token<DelegationTokenIdentifier> token = 
-          new Token<DelegationTokenIdentifier>();
-        token.decodeFromUrlString(tokenString);
-        InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
-        if (serviceAddress != null) {
-          SecurityUtil.setTokenService(token, serviceAddress);
-          token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
-        }
-        ByteArrayInputStream buf = new ByteArrayInputStream(token
-            .getIdentifier());
-        DataInputStream in = new DataInputStream(buf);
-        DelegationTokenIdentifier id = new DelegationTokenIdentifier();
-        id.readFields(in);
-        if (context != null) {
-          final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
-          if (nn != null) {
-            // Verify the token.
-            nn.getNamesystem().verifyToken(id, token.getPassword());
-          }
-        }
-        ugi = id.getUser();
-        if (ugi.getRealUser() == null) {
-          //non-proxy case
-          checkUsername(ugi.getShortUserName(), usernameFromQuery);
-          checkUsername(null, doAsUserFromQuery);
-        } else {
-          //proxy case
-          checkUsername(ugi.getRealUser().getShortUserName(), usernameFromQuery);
-          checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
-          ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
-        }
-        ugi.addToken(token);
-        ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
-      } else {
-        if(remoteUser == null) {
-          throw new IOException("Security enabled but user not " +
-                                "authenticated by filter");
-        }
-        final UserGroupInformation realUgi = UserGroupInformation.createRemoteUser(remoteUser);
-        checkUsername(realUgi.getShortUserName(), usernameFromQuery);
+        // Token-based connections need only verify the effective user, and
+        // disallow proxying to different user.  Proxy authorization checks
+        // are not required since the checks apply to issuing a token.
+        ugi = getTokenUGI(context, request, tokenString, conf);
+        checkUsername(ugi.getShortUserName(), usernameFromQuery);
+        checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
+      } else if (remoteUser == null) {
+        throw new IOException(
+            "Security enabled but user not authenticated by filter");
+      }
+    } else {
+      // Security's not on, pull from url or use default web user
+      remoteUser = (usernameFromQuery == null)
+          ? getDefaultWebUserName(conf) // not specified in request
+          : usernameFromQuery;
+    }
+
+    if (ugi == null) { // security is off, or there's no token
+      ugi = UserGroupInformation.createRemoteUser(remoteUser);
+      checkUsername(ugi.getShortUserName(), usernameFromQuery);
+      if (UserGroupInformation.isSecurityEnabled()) {
         // This is not necessarily true, could have been auth'ed by user-facing
         // This is not necessarily true, could have been auth'ed by user-facing
         // filter
         // filter
-        realUgi.setAuthenticationMethod(secureAuthMethod);
-        ugi = initUGI(realUgi, doAsUserFromQuery, request, true, conf);
+        ugi.setAuthenticationMethod(secureAuthMethod);
+      }
+      if (doAsUserFromQuery != null) {
+        // create and attempt to authorize a proxy user
+        ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
+        ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
       }
       }
-    } else { // Security's not on, pull from url
-      final UserGroupInformation realUgi = usernameFromQuery == null?
-          getDefaultWebUser(conf) // not specified in request
-          : UserGroupInformation.createRemoteUser(usernameFromQuery);
-      realUgi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
-      ugi = initUGI(realUgi, doAsUserFromQuery, request, false, conf);
     }
     }
     
     
     if(LOG.isDebugEnabled())
     if(LOG.isDebugEnabled())
@@ -604,21 +589,34 @@ public class JspHelper {
     return ugi;
     return ugi;
   }
   }
 
 
-  private static UserGroupInformation initUGI(final UserGroupInformation realUgi,
-      final String doAsUserFromQuery, final HttpServletRequest request,
-      final boolean isSecurityEnabled, final Configuration conf
-      ) throws AuthorizationException {
-    final UserGroupInformation ugi;
-    if (doAsUserFromQuery == null) {
-      //non-proxy case
-      ugi = realUgi;
-    } else {
-      //proxy case
-      ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, realUgi);
-      ugi.setAuthenticationMethod(
-          isSecurityEnabled? AuthenticationMethod.PROXY: AuthenticationMethod.SIMPLE);
-      ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
+  private static UserGroupInformation getTokenUGI(ServletContext context,
+                                                  HttpServletRequest request,
+                                                  String tokenString,
+                                                  Configuration conf)
+                                                      throws IOException {
+    final Token<DelegationTokenIdentifier> token =
+        new Token<DelegationTokenIdentifier>();
+    token.decodeFromUrlString(tokenString);
+    InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
+    if (serviceAddress != null) {
+      SecurityUtil.setTokenService(token, serviceAddress);
+      token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+    }
+
+    ByteArrayInputStream buf =
+        new ByteArrayInputStream(token.getIdentifier());
+    DataInputStream in = new DataInputStream(buf);
+    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+    id.readFields(in);
+    if (context != null) {
+      final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
+      if (nn != null) {
+        // Verify the token.
+        nn.getNamesystem().verifyToken(id, token.getPassword());
+      }
     }
     }
+    UserGroupInformation ugi = id.getUser();
+    ugi.addToken(token);
     return ugi;
     return ugi;
   }
   }
 
 

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
 import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -486,11 +487,14 @@ class BlockSender implements java.io.Closeable {
         
         
         // no need to flush since we know out is not a buffered stream
         // no need to flush since we know out is not a buffered stream
         FileChannel fileCh = ((FileInputStream)blockIn).getChannel();
         FileChannel fileCh = ((FileInputStream)blockIn).getChannel();
+        LongWritable waitTime = new LongWritable();
+        LongWritable transferTime = new LongWritable();
         sockOut.transferToFully(fileCh, blockInPosition, dataLen, 
         sockOut.transferToFully(fileCh, blockInPosition, dataLen, 
-            datanode.metrics.getSendDataPacketBlockedOnNetworkNanos(),
-            datanode.metrics.getSendDataPacketTransferNanos());
+            waitTime, transferTime);
+        datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
+        datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
         blockInPosition += dataLen;
         blockInPosition += dataLen;
-      } else { 
+      } else {
         // normal transfer
         // normal transfer
         out.write(buf, 0, dataOff + dataLen);
         out.write(buf, 0, dataOff + dataLen);
       }
       }

+ 68 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 
@@ -74,19 +75,54 @@ public class DataNodeMetrics {
   @Metric MutableRate heartbeats;
   @Metric MutableRate heartbeats;
   @Metric MutableRate blockReports;
   @Metric MutableRate blockReports;
   @Metric MutableRate packetAckRoundTripTimeNanos;
   @Metric MutableRate packetAckRoundTripTimeNanos;
-
+  MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles;
+  
   @Metric MutableRate flushNanos;
   @Metric MutableRate flushNanos;
+  MutableQuantiles[] flushNanosQuantiles;
+  
   @Metric MutableRate fsyncNanos;
   @Metric MutableRate fsyncNanos;
+  MutableQuantiles[] fsyncNanosQuantiles;
   
   
   @Metric MutableRate sendDataPacketBlockedOnNetworkNanos;
   @Metric MutableRate sendDataPacketBlockedOnNetworkNanos;
+  MutableQuantiles[] sendDataPacketBlockedOnNetworkNanosQuantiles;
   @Metric MutableRate sendDataPacketTransferNanos;
   @Metric MutableRate sendDataPacketTransferNanos;
+  MutableQuantiles[] sendDataPacketTransferNanosQuantiles;
+  
 
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final String name;
   final String name;
 
 
-  public DataNodeMetrics(String name, String sessionId) {
+  public DataNodeMetrics(String name, String sessionId, int[] intervals) {
     this.name = name;
     this.name = name;
     registry.tag(SessionId, sessionId);
     registry.tag(SessionId, sessionId);
+    
+    final int len = intervals.length;
+    packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len];
+    flushNanosQuantiles = new MutableQuantiles[len];
+    fsyncNanosQuantiles = new MutableQuantiles[len];
+    sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len];
+    sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
+    
+    for (int i = 0; i < len; i++) {
+      int interval = intervals[i];
+      packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
+          "packetAckRoundTripTimeNanos" + interval + "s",
+          "Packet Ack RTT in ns", "ops", "latency", interval);
+      flushNanosQuantiles[i] = registry.newQuantiles(
+          "flushNanos" + interval + "s", 
+          "Disk flush latency in ns", "ops", "latency", interval);
+      fsyncNanosQuantiles[i] = registry.newQuantiles(
+          "fsyncNanos" + interval + "s", "Disk fsync latency in ns", 
+          "ops", "latency", interval);
+      sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
+          "sendDataPacketBlockedOnNetworkNanos" + interval + "s", 
+          "Time blocked on network while sending a packet in ns",
+          "ops", "latency", interval);
+      sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
+          "sendDataPacketTransferNanos" + interval + "s", 
+          "Time reading from disk and writing to network while sending " +
+          "a packet in ns", "ops", "latency", interval);
+    }
   }
   }
 
 
   public static DataNodeMetrics create(Configuration conf, String dnName) {
   public static DataNodeMetrics create(Configuration conf, String dnName) {
@@ -94,8 +130,15 @@ public class DataNodeMetrics {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics.create("DataNode", sessionId, ms);
     JvmMetrics.create("DataNode", sessionId, ms);
     String name = "DataNodeActivity-"+ (dnName.isEmpty()
     String name = "DataNodeActivity-"+ (dnName.isEmpty()
-        ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() : dnName.replace(':', '-'));
-    return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+        ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
+            : dnName.replace(':', '-'));
+
+    // Percentile measurement is off by default, by watching no intervals
+    int[] intervals = 
+        conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+    
+    return ms.register(name, null, new DataNodeMetrics(name, sessionId,
+        intervals));
   }
   }
 
 
   public String name() { return name; }
   public String name() { return name; }
@@ -166,14 +209,23 @@ public class DataNodeMetrics {
 
 
   public void addPacketAckRoundTripTimeNanos(long latencyNanos) {
   public void addPacketAckRoundTripTimeNanos(long latencyNanos) {
     packetAckRoundTripTimeNanos.add(latencyNanos);
     packetAckRoundTripTimeNanos.add(latencyNanos);
+    for (MutableQuantiles q : packetAckRoundTripTimeNanosQuantiles) {
+      q.add(latencyNanos);
+    }
   }
   }
 
 
   public void addFlushNanos(long latencyNanos) {
   public void addFlushNanos(long latencyNanos) {
     flushNanos.add(latencyNanos);
     flushNanos.add(latencyNanos);
+    for (MutableQuantiles q : flushNanosQuantiles) {
+      q.add(latencyNanos);
+    }
   }
   }
 
 
   public void addFsyncNanos(long latencyNanos) {
   public void addFsyncNanos(long latencyNanos) {
     fsyncNanos.add(latencyNanos);
     fsyncNanos.add(latencyNanos);
+    for (MutableQuantiles q : fsyncNanosQuantiles) {
+      q.add(latencyNanos);
+    }
   }
   }
 
 
   public void shutdown() {
   public void shutdown() {
@@ -196,12 +248,18 @@ public class DataNodeMetrics {
   public void incrBlocksGetLocalPathInfo() {
   public void incrBlocksGetLocalPathInfo() {
     blocksGetLocalPathInfo.incr();
     blocksGetLocalPathInfo.incr();
   }
   }
-  
-  public MutableRate getSendDataPacketBlockedOnNetworkNanos() {
-    return sendDataPacketBlockedOnNetworkNanos;
+
+  public void addSendDataPacketBlockedOnNetworkNanos(long latencyNanos) {
+    sendDataPacketBlockedOnNetworkNanos.add(latencyNanos);
+    for (MutableQuantiles q : sendDataPacketBlockedOnNetworkNanosQuantiles) {
+      q.add(latencyNanos);
+    }
   }
   }
-  
-  public MutableRate getSendDataPacketTransferNanos() {
-    return sendDataPacketTransferNanos;
+
+  public void addSendDataPacketTransferNanos(long latencyNanos) {
+    sendDataPacketTransferNanos.add(latencyNanos);
+    for (MutableQuantiles q : sendDataPacketTransferNanosQuantiles) {
+      q.add(latencyNanos);
+    }
   }
   }
 }
 }

+ 12 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java

@@ -113,12 +113,19 @@ public class CheckpointSignature extends StorageInfo
          + blockpoolID ;
          + blockpoolID ;
   }
   }
 
 
+  boolean storageVersionMatches(StorageInfo si) throws IOException {
+    return (layoutVersion == si.layoutVersion) && (cTime == si.cTime);
+  }
+
+  boolean isSameCluster(FSImage si) {
+    return namespaceID == si.getStorage().namespaceID &&
+      clusterID.equals(si.getClusterID()) &&
+      blockpoolID.equals(si.getBlockPoolID());
+  }
+
   void validateStorageInfo(FSImage si) throws IOException {
   void validateStorageInfo(FSImage si) throws IOException {
-    if(layoutVersion != si.getStorage().layoutVersion
-       || namespaceID != si.getStorage().namespaceID 
-       || cTime != si.getStorage().cTime
-       || !clusterID.equals(si.getClusterID())
-       || !blockpoolID.equals(si.getBlockPoolID())) {
+    if (!isSameCluster(si)
+        || !storageVersionMatches(si.getStorage())) {
       throw new IOException("Inconsistent checkpoint fields.\n"
       throw new IOException("Inconsistent checkpoint fields.\n"
           + "LV = " + layoutVersion + " namespaceID = " + namespaceID
           + "LV = " + layoutVersion + " namespaceID = " + namespaceID
           + " cTime = " + cTime
           + " cTime = " + cTime

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -230,8 +230,15 @@ public class FSDirectory implements Closeable {
 
 
     // Always do an implicit mkdirs for parent directory tree.
     // Always do an implicit mkdirs for parent directory tree.
     long modTime = now();
     long modTime = now();
-    if (!mkdirs(new Path(path).getParent().toString(), permissions, true,
-        modTime)) {
+    
+    Path parent = new Path(path).getParent();
+    if (parent == null) {
+      // Trying to add "/" as a file - this path has no
+      // parent -- avoids an NPE below.
+      return null;
+    }
+    
+    if (!mkdirs(parent.toString(), permissions, true, modTime)) {
       return null;
       return null;
     }
     }
     INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
     INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(

+ 7 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -437,18 +437,16 @@ public class SecondaryNameNode implements Runnable {
     // Returns a token that would be used to upload the merged image.
     // Returns a token that would be used to upload the merged image.
     CheckpointSignature sig = namenode.rollEditLog();
     CheckpointSignature sig = namenode.rollEditLog();
     
     
-    // Make sure we're talking to the same NN!
-    if (checkpointImage.getNamespaceID() != 0) {
-      // If the image actually has some data, make sure we're talking
-      // to the same NN as we did before.
-      sig.validateStorageInfo(checkpointImage);
-    } else {
-      // if we're a fresh 2NN, just take the storage info from the server
-      // we first talk to.
+    if ((checkpointImage.getNamespaceID() == 0) ||
+        (sig.isSameCluster(checkpointImage) &&
+         !sig.storageVersionMatches(checkpointImage.getStorage()))) {
+      // if we're a fresh 2NN, or if we're on the same cluster and our storage
+      // needs an upgrade, just take the storage info from the server.
       dstStorage.setStorageInfo(sig);
       dstStorage.setStorageInfo(sig);
       dstStorage.setClusterID(sig.getClusterID());
       dstStorage.setClusterID(sig.getClusterID());
       dstStorage.setBlockPoolID(sig.getBlockpoolID());
       dstStorage.setBlockPoolID(sig.getBlockpoolID());
     }
     }
+    sig.validateStorageInfo(checkpointImage);
 
 
     // error simulation code for junit test
     // error simulation code for junit test
     CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
     CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
@@ -703,7 +701,7 @@ public class SecondaryNameNode implements Runnable {
     /**
     /**
      * Analyze checkpoint directories.
      * Analyze checkpoint directories.
      * Create directories if they do not exist.
      * Create directories if they do not exist.
-     * Recover from an unsuccessful checkpoint is necessary.
+     * Recover from an unsuccessful checkpoint if necessary.
      *
      *
      * @throws IOException
      * @throws IOException
      */
      */

+ 33 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

@@ -17,17 +17,20 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 
+import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
-import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 
@@ -57,15 +60,31 @@ public class NameNodeMetrics {
 
 
   @Metric("Journal transactions") MutableRate transactions;
   @Metric("Journal transactions") MutableRate transactions;
   @Metric("Journal syncs") MutableRate syncs;
   @Metric("Journal syncs") MutableRate syncs;
+  MutableQuantiles[] syncsQuantiles;
   @Metric("Journal transactions batched in sync")
   @Metric("Journal transactions batched in sync")
   MutableCounterLong transactionsBatchedInSync;
   MutableCounterLong transactionsBatchedInSync;
   @Metric("Block report") MutableRate blockReport;
   @Metric("Block report") MutableRate blockReport;
+  MutableQuantiles[] blockReportQuantiles;
 
 
   @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
   @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
   @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
   @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
 
 
-  NameNodeMetrics(String processName, String sessionId) {
+  NameNodeMetrics(String processName, String sessionId, int[] intervals) {
     registry.tag(ProcessName, processName).tag(SessionId, sessionId);
     registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+    
+    final int len = intervals.length;
+    syncsQuantiles = new MutableQuantiles[len];
+    blockReportQuantiles = new MutableQuantiles[len];
+    
+    for (int i = 0; i < len; i++) {
+      int interval = intervals[i];
+      syncsQuantiles[i] = registry.newQuantiles(
+          "syncs" + interval + "s",
+          "Journal syncs", "ops", "latency", interval);
+      blockReportQuantiles[i] = registry.newQuantiles(
+          "blockReport" + interval + "s", 
+          "Block report", "ops", "latency", interval);
+    }
   }
   }
 
 
   public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
   public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
@@ -73,7 +92,11 @@ public class NameNodeMetrics {
     String processName = r.toString();
     String processName = r.toString();
     MetricsSystem ms = DefaultMetricsSystem.instance();
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics.create(processName, sessionId, ms);
     JvmMetrics.create(processName, sessionId, ms);
-    return ms.register(new NameNodeMetrics(processName, sessionId));
+    
+    // Percentile measurement is off by default, by watching no intervals
+    int[] intervals = 
+        conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+    return ms.register(new NameNodeMetrics(processName, sessionId, intervals));
   }
   }
 
 
   public void shutdown() {
   public void shutdown() {
@@ -146,6 +169,9 @@ public class NameNodeMetrics {
 
 
   public void addSync(long elapsed) {
   public void addSync(long elapsed) {
     syncs.add(elapsed);
     syncs.add(elapsed);
+    for (MutableQuantiles q : syncsQuantiles) {
+      q.add(elapsed);
+    }
   }
   }
 
 
   public void setFsImageLoadTime(long elapsed) {
   public void setFsImageLoadTime(long elapsed) {
@@ -154,6 +180,9 @@ public class NameNodeMetrics {
 
 
   public void addBlockReport(long latency) {
   public void addBlockReport(long latency) {
     blockReport.add(latency);
     blockReport.add(latency);
+    for (MutableQuantiles q : blockReportQuantiles) {
+      q.add(latency);
+    }
   }
   }
 
 
   public void setSafeModeTime(long elapsed) {
   public void setSafeModeTime(long elapsed) {

+ 212 - 120
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -88,6 +89,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -147,6 +149,7 @@ public class WebHdfsFileSystem extends FileSystem
   private URI uri;
   private URI uri;
   private Token<?> delegationToken;
   private Token<?> delegationToken;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+  private RetryPolicy retryPolicy = null;
   private Path workingDir;
   private Path workingDir;
 
 
   {
   {
@@ -179,6 +182,7 @@ public class WebHdfsFileSystem extends FileSystem
       throw new IllegalArgumentException(e);
       throw new IllegalArgumentException(e);
     }
     }
     this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
     this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
+    this.retryPolicy = NameNodeProxies.getDefaultRetryPolicy(conf);
     this.workingDir = getHomeDirectory();
     this.workingDir = getHomeDirectory();
 
 
     if (UserGroupInformation.isSecurityEnabled()) {
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -276,13 +280,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
   }
 
 
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
-      final HttpURLConnection conn) throws IOException {
+      final HttpURLConnection conn, boolean unwrapException) throws IOException {
     final int code = conn.getResponseCode();
     final int code = conn.getResponseCode();
     if (code != op.getExpectedHttpResponseCode()) {
     if (code != op.getExpectedHttpResponseCode()) {
       final Map<?, ?> m;
       final Map<?, ?> m;
       try {
       try {
         m = jsonParse(conn, true);
         m = jsonParse(conn, true);
-      } catch(IOException e) {
+      } catch(Exception e) {
         throw new IOException("Unexpected HTTP response: code=" + code + " != "
         throw new IOException("Unexpected HTTP response: code=" + code + " != "
             + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
             + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
             + ", message=" + conn.getResponseMessage(), e);
             + ", message=" + conn.getResponseMessage(), e);
@@ -293,21 +297,42 @@ public class WebHdfsFileSystem extends FileSystem
       }
       }
 
 
       final RemoteException re = JsonUtil.toRemoteException(m);
       final RemoteException re = JsonUtil.toRemoteException(m);
-      throw re.unwrapRemoteException(AccessControlException.class,
-          InvalidToken.class,
-          AuthenticationException.class,
-          AuthorizationException.class,
-          FileAlreadyExistsException.class,
-          FileNotFoundException.class,
-          ParentNotDirectoryException.class,
-          UnresolvedPathException.class,
-          SafeModeException.class,
-          DSQuotaExceededException.class,
-          NSQuotaExceededException.class);
+      throw unwrapException? toIOException(re): re;
     }
     }
     return null;
     return null;
   }
   }
 
 
+  /**
+   * Covert an exception to an IOException.
+   * 
+   * For a non-IOException, wrap it with IOException.
+   * For a RemoteException, unwrap it.
+   * For an IOException which is not a RemoteException, return it. 
+   */
+  private static IOException toIOException(Exception e) {
+    if (!(e instanceof IOException)) {
+      return new IOException(e);
+    }
+
+    final IOException ioe = (IOException)e;
+    if (!(ioe instanceof RemoteException)) {
+      return ioe;
+    }
+
+    final RemoteException re = (RemoteException)ioe;
+    return re.unwrapRemoteException(AccessControlException.class,
+        InvalidToken.class,
+        AuthenticationException.class,
+        AuthorizationException.class,
+        FileAlreadyExistsException.class,
+        FileNotFoundException.class,
+        ParentNotDirectoryException.class,
+        UnresolvedPathException.class,
+        SafeModeException.class,
+        DSQuotaExceededException.class,
+        NSQuotaExceededException.class);
+  }
+
   /**
   /**
    * Return a URL pointing to given path on the namenode.
    * Return a URL pointing to given path on the namenode.
    *
    *
@@ -362,67 +387,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
   }
 
 
   private HttpURLConnection getHttpUrlConnection(URL url)
   private HttpURLConnection getHttpUrlConnection(URL url)
-      throws IOException {
+      throws IOException, AuthenticationException {
     final HttpURLConnection conn;
     final HttpURLConnection conn;
-    try {
-      if (ugi.hasKerberosCredentials()) { 
-        conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
-      } else {
-        conn = (HttpURLConnection)url.openConnection();
-      }
-    } catch (AuthenticationException e) {
-      throw new IOException("Authentication failed, url=" + url, e);
-    }
-    return conn;
-  }
-  
-  private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
-      final Param<?,?>... parameters) throws IOException {
-    final URL url = toUrl(op, fspath, parameters);
-
-    //connect and get response
-    HttpURLConnection conn = getHttpUrlConnection(url);
-    try {
-      conn.setRequestMethod(op.getType().toString());
-      if (op.getDoOutput()) {
-        conn = twoStepWrite(conn, op);
-        conn.setRequestProperty("Content-Type", "application/octet-stream");
-      }
-      conn.setDoOutput(op.getDoOutput());
-      conn.connect();
-      return conn;
-    } catch (IOException e) {
-      conn.disconnect();
-      throw e;
+    if (ugi.hasKerberosCredentials()) { 
+      conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
+    } else {
+      conn = (HttpURLConnection)url.openConnection();
     }
     }
-  }
-  
-  /**
-   * Two-step Create/Append:
-   * Step 1) Submit a Http request with neither auto-redirect nor data. 
-   * Step 2) Submit another Http request with the URL from the Location header with data.
-   * 
-   * The reason of having two-step create/append is for preventing clients to
-   * send out the data before the redirect. This issue is addressed by the
-   * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
-   * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
-   * and Java 6 http client), which do not correctly implement "Expect:
-   * 100-continue". The two-step create/append is a temporary workaround for
-   * the software library bugs.
-   */
-  static HttpURLConnection twoStepWrite(HttpURLConnection conn,
-      final HttpOpParam.Op op) throws IOException {
-    //Step 1) Submit a Http request with neither auto-redirect nor data. 
-    conn.setInstanceFollowRedirects(false);
-    conn.setDoOutput(false);
-    conn.connect();
-    validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
-    final String redirect = conn.getHeaderField("Location");
-    conn.disconnect();
-
-    //Step 2) Submit another Http request with the URL from the Location header with data.
-    conn = (HttpURLConnection)new URL(redirect).openConnection();
-    conn.setRequestMethod(op.getType().toString());
     return conn;
     return conn;
   }
   }
 
 
@@ -438,12 +409,158 @@ public class WebHdfsFileSystem extends FileSystem
    */
    */
   private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
   private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
       final Param<?,?>... parameters) throws IOException {
-    final HttpURLConnection conn = httpConnect(op, fspath, parameters);
-    try {
-      final Map<?, ?> m = validateResponse(op, conn);
-      return m != null? m: jsonParse(conn, false);
-    } finally {
-      conn.disconnect();
+    return new Runner(op, fspath, parameters).run().json;
+  }
+
+  /**
+   * This class is for initialing a HTTP connection, connecting to server,
+   * obtaining a response, and also handling retry on failures.
+   */
+  class Runner {
+    private final HttpOpParam.Op op;
+    private final URL url;
+    private final boolean redirected;
+
+    private boolean checkRetry;
+    private HttpURLConnection conn = null;
+    private Map<?, ?> json = null;
+
+    Runner(final HttpOpParam.Op op, final URL url, final boolean redirected) {
+      this.op = op;
+      this.url = url;
+      this.redirected = redirected;
+    }
+
+    Runner(final HttpOpParam.Op op, final Path fspath,
+        final Param<?,?>... parameters) throws IOException {
+      this(op, toUrl(op, fspath, parameters), false);
+    }
+
+    Runner(final HttpOpParam.Op op, final HttpURLConnection conn) {
+      this(op, null, false);
+      this.conn = conn;
+    }
+
+    private void init() throws IOException {
+      checkRetry = !redirected;
+      try {
+        conn = getHttpUrlConnection(url);
+      } catch(AuthenticationException ae) {
+        checkRetry = false;
+        throw new IOException("Authentication failed, url=" + url, ae);
+      }
+    }
+    
+    private void connect() throws IOException {
+      connect(op.getDoOutput());
+    }
+
+    private void connect(boolean doOutput) throws IOException {
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(doOutput);
+      conn.setInstanceFollowRedirects(false);
+      conn.connect();
+    }
+
+    private void disconnect() {
+      if (conn != null) {
+        conn.disconnect();
+        conn = null;
+      }
+    }
+
+    Runner run() throws IOException {
+      for(int retry = 0; ; retry++) {
+        try {
+          init();
+          if (op.getDoOutput()) {
+            twoStepWrite();
+          } else {
+            getResponse(op != GetOpParam.Op.OPEN);
+          }
+          return this;
+        } catch(IOException ioe) {
+          shouldRetry(ioe, retry);
+        }
+      }
+    }
+
+    private void shouldRetry(final IOException ioe, final int retry
+        ) throws IOException {
+      if (checkRetry) {
+        try {
+          final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
+              ioe, retry, 0, true);
+          if (a.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
+            LOG.info("Retrying connect to namenode: " + nnAddr
+                + ". Already tried " + retry + " time(s); retry policy is "
+                + retryPolicy + ", delay " + a.delayMillis + "ms.");      
+            Thread.sleep(a.delayMillis);
+            return;
+          }
+        } catch(Exception e) {
+          LOG.warn("Original exception is ", ioe);
+          throw toIOException(e);
+        }
+      }
+      throw toIOException(ioe);
+    }
+
+    /**
+     * Two-step Create/Append:
+     * Step 1) Submit a Http request with neither auto-redirect nor data. 
+     * Step 2) Submit another Http request with the URL from the Location header with data.
+     * 
+     * The reason of having two-step create/append is for preventing clients to
+     * send out the data before the redirect. This issue is addressed by the
+     * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
+     * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
+     * and Java 6 http client), which do not correctly implement "Expect:
+     * 100-continue". The two-step create/append is a temporary workaround for
+     * the software library bugs.
+     */
+    HttpURLConnection twoStepWrite() throws IOException {
+      //Step 1) Submit a Http request with neither auto-redirect nor data. 
+      connect(false);
+      validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
+      final String redirect = conn.getHeaderField("Location");
+      disconnect();
+      checkRetry = false;
+      
+      //Step 2) Submit another Http request with the URL from the Location header with data.
+      conn = (HttpURLConnection)new URL(redirect).openConnection();
+      conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
+      connect();
+      return conn;
+    }
+
+    FSDataOutputStream write(final int bufferSize) throws IOException {
+      return WebHdfsFileSystem.this.write(op, conn, bufferSize);
+    }
+
+    void getResponse(boolean getJsonAndDisconnect) throws IOException {
+      try {
+        connect();
+        if (!redirected && op.getRedirect()) {
+          final String redirect = conn.getHeaderField("Location");
+          json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
+              conn, false);
+          disconnect();
+  
+          checkRetry = false;
+          conn = (HttpURLConnection)new URL(redirect).openConnection();
+          connect();
+        }
+
+        json = validateResponse(op, conn, false);
+        if (json == null && getJsonAndDisconnect) {
+          json = jsonParse(conn, false);
+        }
+      } finally {
+        if (getJsonAndDisconnect) {
+          disconnect();
+        }
+      }
     }
     }
   }
   }
 
 
@@ -577,7 +694,7 @@ public class WebHdfsFileSystem extends FileSystem
           super.close();
           super.close();
         } finally {
         } finally {
           try {
           try {
-            validateResponse(op, conn);
+            validateResponse(op, conn, true);
           } finally {
           } finally {
             conn.disconnect();
             conn.disconnect();
           }
           }
@@ -593,13 +710,14 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
 
 
     final HttpOpParam.Op op = PutOpParam.Op.CREATE;
     final HttpOpParam.Op op = PutOpParam.Op.CREATE;
-    final HttpURLConnection conn = httpConnect(op, f, 
+    return new Runner(op, f, 
         new PermissionParam(applyUMask(permission)),
         new PermissionParam(applyUMask(permission)),
         new OverwriteParam(overwrite),
         new OverwriteParam(overwrite),
         new BufferSizeParam(bufferSize),
         new BufferSizeParam(bufferSize),
         new ReplicationParam(replication),
         new ReplicationParam(replication),
-        new BlockSizeParam(blockSize));
-    return write(op, conn, bufferSize);
+        new BlockSizeParam(blockSize))
+      .run()
+      .write(bufferSize);
   }
   }
 
 
   @Override
   @Override
@@ -608,9 +726,9 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
 
 
     final HttpOpParam.Op op = PostOpParam.Op.APPEND;
     final HttpOpParam.Op op = PostOpParam.Op.APPEND;
-    final HttpURLConnection conn = httpConnect(op, f, 
-        new BufferSizeParam(bufferSize));
-    return write(op, conn, bufferSize);
+    return new Runner(op, f, new BufferSizeParam(bufferSize))
+      .run()
+      .write(bufferSize);
   }
   }
 
 
   @SuppressWarnings("deprecation")
   @SuppressWarnings("deprecation")
@@ -637,26 +755,17 @@ public class WebHdfsFileSystem extends FileSystem
   }
   }
 
 
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
-    /** The url with offset parameter */
-    private URL offsetUrl;
-  
     OffsetUrlOpener(final URL url) {
     OffsetUrlOpener(final URL url) {
       super(url);
       super(url);
     }
     }
 
 
-    /** Open connection with offset url. */
+    /** Setup offset url and connect. */
     @Override
     @Override
-    protected HttpURLConnection openConnection() throws IOException {
-      return getHttpUrlConnection(offsetUrl);
-    }
-
-    /** Setup offset url before open connection. */
-    @Override
-    protected HttpURLConnection openConnection(final long offset) throws IOException {
-      offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset));
-      final HttpURLConnection conn = openConnection();
-      conn.setRequestMethod("GET");
-      return conn;
+    protected HttpURLConnection connect(final long offset,
+        final boolean resolved) throws IOException {
+      final URL offsetUrl = offset == 0L? url
+          : new URL(url + "&" + new OffsetParam(offset));
+      return new Runner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
     }  
     }  
   }
   }
 
 
@@ -697,12 +806,6 @@ public class WebHdfsFileSystem extends FileSystem
     OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
     OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
       super(o, r);
       super(o, r);
     }
     }
-    
-    @Override
-    protected void checkResponseCode(final HttpURLConnection connection
-        ) throws IOException {
-      validateResponse(GetOpParam.Op.OPEN, connection);
-    }
 
 
     /** Remove offset parameter before returning the resolved url. */
     /** Remove offset parameter before returning the resolved url. */
     @Override
     @Override
@@ -835,8 +938,7 @@ public class WebHdfsFileSystem extends FileSystem
     }
     }
 
 
     private static WebHdfsFileSystem getWebHdfs(
     private static WebHdfsFileSystem getWebHdfs(
-        final Token<?> token, final Configuration conf
-        ) throws IOException, InterruptedException, URISyntaxException {
+        final Token<?> token, final Configuration conf) throws IOException {
       
       
       final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
       final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
       final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
       final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
@@ -850,12 +952,7 @@ public class WebHdfsFileSystem extends FileSystem
       // update the kerberos credentials, if they are coming from a keytab
       // update the kerberos credentials, if they are coming from a keytab
       ugi.reloginFromKeytab();
       ugi.reloginFromKeytab();
 
 
-      try {
-        WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
-        return webhdfs.renewDelegationToken(token);
-      } catch (URISyntaxException e) {
-        throw new IOException(e);
-      }
+      return getWebHdfs(token, conf).renewDelegationToken(token);
     }
     }
   
   
     @Override
     @Override
@@ -865,12 +962,7 @@ public class WebHdfsFileSystem extends FileSystem
       // update the kerberos credentials, if they are coming from a keytab
       // update the kerberos credentials, if they are coming from a keytab
       ugi.checkTGTAndReloginFromKeytab();
       ugi.checkTGTAndReloginFromKeytab();
 
 
-      try {
-        final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
-        webhdfs.cancelDelegationToken(token);
-      } catch (URISyntaxException e) {
-        throw new IOException(e);
-      }
+      getWebHdfs(token, conf).cancelDelegationToken(token);
     }
     }
   }
   }
   
   

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java

@@ -43,6 +43,11 @@ public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
       return false;
       return false;
     }
     }
 
 
+    @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
     @Override
     @Override
     public int getExpectedHttpResponseCode() {
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
       return expectedHttpResponseCode;

+ 18 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

@@ -23,25 +23,27 @@ import java.net.HttpURLConnection;
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
   /** Get operations. */
   /** Get operations. */
   public static enum Op implements HttpOpParam.Op {
   public static enum Op implements HttpOpParam.Op {
-    OPEN(HttpURLConnection.HTTP_OK),
+    OPEN(true, HttpURLConnection.HTTP_OK),
 
 
-    GETFILESTATUS(HttpURLConnection.HTTP_OK),
-    LISTSTATUS(HttpURLConnection.HTTP_OK),
-    GETCONTENTSUMMARY(HttpURLConnection.HTTP_OK),
-    GETFILECHECKSUM(HttpURLConnection.HTTP_OK),
+    GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
+    LISTSTATUS(false, HttpURLConnection.HTTP_OK),
+    GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
+    GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
 
 
-    GETHOMEDIRECTORY(HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKENS(HttpURLConnection.HTTP_OK),
+    GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKENS(false, HttpURLConnection.HTTP_OK),
 
 
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
-    GET_BLOCK_LOCATIONS(HttpURLConnection.HTTP_OK),
+    GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
 
 
-    NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
 
+    final boolean redirect;
     final int expectedHttpResponseCode;
     final int expectedHttpResponseCode;
 
 
-    Op(final int expectedHttpResponseCode) {
+    Op(final boolean redirect, final int expectedHttpResponseCode) {
+      this.redirect = redirect;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
     }
     }
 
 
@@ -55,6 +57,11 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
       return false;
       return false;
     }
     }
 
 
+    @Override
+    public boolean getRedirect() {
+      return redirect;
+    }
+
     @Override
     @Override
     public int getExpectedHttpResponseCode() {
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
       return expectedHttpResponseCode;

+ 28 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java

@@ -17,6 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs.web.resources;
 package org.apache.hadoop.hdfs.web.resources;
 
 
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response;
 
 
 
 
@@ -42,6 +46,9 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
     /** @return true if the operation will do output. */
     /** @return true if the operation will do output. */
     public boolean getDoOutput();
     public boolean getDoOutput();
 
 
+    /** @return true if the operation will be redirected. */
+    public boolean getRedirect();
+
     /** @return true the expected http response code. */
     /** @return true the expected http response code. */
     public int getExpectedHttpResponseCode();
     public int getExpectedHttpResponseCode();
 
 
@@ -51,15 +58,25 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
 
 
   /** Expects HTTP response 307 "Temporary Redirect". */
   /** Expects HTTP response 307 "Temporary Redirect". */
   public static class TemporaryRedirectOp implements Op {
   public static class TemporaryRedirectOp implements Op {
-    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(PutOpParam.Op.CREATE);
-    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(PostOpParam.Op.APPEND);
+    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(
+        PutOpParam.Op.CREATE);
+    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(
+        PostOpParam.Op.APPEND);
+    static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp(
+        GetOpParam.Op.OPEN);
+    static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp(
+        GetOpParam.Op.GETFILECHECKSUM);
     
     
+    static final List<TemporaryRedirectOp> values
+        = Collections.unmodifiableList(Arrays.asList(
+            new TemporaryRedirectOp[]{CREATE, APPEND, OPEN, GETFILECHECKSUM}));
+
     /** Get an object for the given op. */
     /** Get an object for the given op. */
     public static TemporaryRedirectOp valueOf(final Op op) {
     public static TemporaryRedirectOp valueOf(final Op op) {
-      if (op == CREATE.op) {
-        return CREATE;
-      } else if (op == APPEND.op) {
-        return APPEND;
+      for(TemporaryRedirectOp t : values) {
+        if (op == t.op) {
+          return t;
+        }
       }
       }
       throw new IllegalArgumentException(op + " not found.");
       throw new IllegalArgumentException(op + " not found.");
     }
     }
@@ -80,6 +97,11 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
       return op.getDoOutput();
       return op.getDoOutput();
     }
     }
 
 
+    @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
     /** Override the original expected response with "Temporary Redirect". */
     /** Override the original expected response with "Temporary Redirect". */
     @Override
     @Override
     public int getExpectedHttpResponseCode() {
     public int getExpectedHttpResponseCode() {

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java

@@ -43,6 +43,11 @@ public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
       return true;
       return true;
     }
     }
 
 
+    @Override
+    public boolean getRedirect() {
+      return true;
+    }
+
     @Override
     @Override
     public int getExpectedHttpResponseCode() {
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
       return expectedHttpResponseCode;

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java

@@ -39,11 +39,11 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
     
     
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
 
-    final boolean doOutput;
+    final boolean doOutputAndRedirect;
     final int expectedHttpResponseCode;
     final int expectedHttpResponseCode;
 
 
-    Op(final boolean doOutput, final int expectedHttpResponseCode) {
-      this.doOutput = doOutput;
+    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+      this.doOutputAndRedirect = doOutputAndRedirect;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
     }
     }
 
 
@@ -54,7 +54,12 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
 
 
     @Override
     @Override
     public boolean getDoOutput() {
     public boolean getDoOutput() {
-      return doOutput;
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return doOutputAndRedirect;
     }
     }
 
 
     @Override
     @Override

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt

@@ -69,5 +69,6 @@ IF(FUSE_FOUND)
         ${JAVA_JVM_LIBRARY}
         ${JAVA_JVM_LIBRARY}
         hdfs
         hdfs
         m
         m
+        pthread
     )
     )
 ENDIF(FUSE_FOUND)
 ENDIF(FUSE_FOUND)

+ 506 - 116
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c

@@ -16,17 +16,38 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-#include "hdfs.h"
-#include "fuse_dfs.h"
 #include "fuse_connect.h"
 #include "fuse_connect.h"
+#include "fuse_dfs.h"
 #include "fuse_users.h" 
 #include "fuse_users.h" 
+#include "libhdfs/hdfs.h"
+#include "util/tree.h"
 
 
+#include <inttypes.h>
 #include <limits.h>
 #include <limits.h>
+#include <poll.h>
 #include <search.h>
 #include <search.h>
 #include <stdio.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdlib.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <utime.h>
+
+#define FUSE_CONN_DEFAULT_TIMER_PERIOD      5
+#define FUSE_CONN_DEFAULT_EXPIRY_PERIOD     (5 * 60)
+#define HADOOP_SECURITY_AUTHENTICATION      "hadoop.security.authentication"
+#define HADOOP_FUSE_CONNECTION_TIMEOUT      "hadoop.fuse.connection.timeout"
+#define HADOOP_FUSE_TIMER_PERIOD            "hadoop.fuse.timer.period"
+
+/** Length of the buffer needed by asctime_r */
+#define TIME_STR_LEN 26
+
+struct hdfsConn;
 
 
-#define HADOOP_SECURITY_AUTHENTICATION "hadoop.security.authentication"
+static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b);
+static void hdfsConnExpiry(void);
+static void* hdfsConnExpiryThread(void *v);
+
+RB_HEAD(hdfsConnTree, hdfsConn);
 
 
 enum authConf {
 enum authConf {
     AUTH_CONF_UNKNOWN,
     AUTH_CONF_UNKNOWN,
@@ -34,80 +55,308 @@ enum authConf {
     AUTH_CONF_OTHER,
     AUTH_CONF_OTHER,
 };
 };
 
 
-#define MAX_ELEMENTS (16 * 1024)
-static struct hsearch_data *fsTable = NULL;
-static enum authConf hdfsAuthConf = AUTH_CONF_UNKNOWN;
-static pthread_mutex_t tableMutex = PTHREAD_MUTEX_INITIALIZER;
+struct hdfsConn {
+  RB_ENTRY(hdfsConn) entry;
+  /** How many threads are currently using this hdfsConnection object */
+  int64_t refcnt;
+  /** The username used to make this connection.  Dynamically allocated. */
+  char *usrname;
+  /** Kerberos ticket cache path, or NULL if this is not a kerberized
+   * connection.  Dynamically allocated. */
+  char *kpath;
+  /** mtime of the kpath, if the kpath is non-NULL */
+  time_t kPathMtime;
+  /** nanosecond component of the mtime of the kpath, if the kpath is non-NULL */
+  long kPathMtimeNs;
+  /** The cached libhdfs fs instance */
+  hdfsFS fs;
+  /** Nonzero if this hdfs connection needs to be closed as soon as possible.
+   * If this is true, the connection has been removed from the tree. */
+  int condemned;
+  /** Number of times we should run the expiration timer on this connection
+   * before removing it. */
+  int expirationCount;
+};
+
+RB_GENERATE(hdfsConnTree, hdfsConn, entry, hdfsConnCompare);
+
+/** Current cached libhdfs connections */
+static struct hdfsConnTree gConnTree;
+
+/** The URI used to make our connections.  Dynamically allocated. */
+static char *gUri;
 
 
-/*
- * Allocate a hash table for fs handles. Returns 0 on success,
- * -1 on failure.
+/** The port used to make our connections, or 0. */
+static int gPort;
+
+/** Lock which protects gConnTree and gConnectTimer->active */
+static pthread_mutex_t gConnMutex;
+
+/** Type of authentication configured */
+static enum authConf gHdfsAuthConf;
+
+/** FUSE connection timer expiration period */
+static int32_t gTimerPeriod;
+
+/** FUSE connection expiry period */
+static int32_t gExpiryPeriod;
+
+/** FUSE timer expiration thread */
+static pthread_t gTimerThread;
+
+/** 
+ * Find out what type of authentication the system administrator
+ * has configured.
+ *
+ * @return     the type of authentication, or AUTH_CONF_UNKNOWN on error.
  */
  */
-int allocFsTable(void) {
-  assert(NULL == fsTable);
-  fsTable = calloc(1, sizeof(struct hsearch_data));
-  if (0 == hcreate_r(MAX_ELEMENTS, fsTable)) {
-    ERROR("Unable to initialize connection table");
-    return -1;
+static enum authConf discoverAuthConf(void)
+{
+  int ret;
+  char *val = NULL;
+  enum authConf authConf;
+
+  ret = hdfsConfGetStr(HADOOP_SECURITY_AUTHENTICATION, &val);
+  if (ret)
+    authConf = AUTH_CONF_UNKNOWN;
+  else if (!val)
+    authConf = AUTH_CONF_OTHER;
+  else if (!strcmp(val, "kerberos"))
+    authConf = AUTH_CONF_KERBEROS;
+  else
+    authConf = AUTH_CONF_OTHER;
+  free(val);
+  return authConf;
+}
+
+int fuseConnectInit(const char *nnUri, int port)
+{
+  const char *timerPeriod;
+  int ret;
+
+  gTimerPeriod = FUSE_CONN_DEFAULT_TIMER_PERIOD;
+  ret = hdfsConfGetInt(HADOOP_FUSE_TIMER_PERIOD, &gTimerPeriod);
+  if (ret) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_FUSE_TIMER_PERIOD);
+    return -EINVAL;
+  }
+  if (gTimerPeriod < 1) {
+    fprintf(stderr, "Invalid value %d given for %s.\n",
+          gTimerPeriod, HADOOP_FUSE_TIMER_PERIOD);
+    return -EINVAL;
+  }
+  gExpiryPeriod = FUSE_CONN_DEFAULT_EXPIRY_PERIOD;
+  ret = hdfsConfGetInt(HADOOP_FUSE_CONNECTION_TIMEOUT, &gExpiryPeriod);
+  if (ret) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_FUSE_CONNECTION_TIMEOUT);
+    return -EINVAL;
+  }
+  if (gExpiryPeriod < 1) {
+    fprintf(stderr, "Invalid value %d given for %s.\n",
+          gExpiryPeriod, HADOOP_FUSE_CONNECTION_TIMEOUT);
+    return -EINVAL;
+  }
+  gHdfsAuthConf = discoverAuthConf();
+  if (gHdfsAuthConf == AUTH_CONF_UNKNOWN) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_SECURITY_AUTHENTICATION);
+    return -EINVAL;
   }
   }
+  gPort = port;
+  gUri = strdup(nnUri);
+  if (!gUri) {
+    fprintf(stderr, "fuseConnectInit: OOM allocting nnUri\n");
+    return -ENOMEM;
+  }
+  ret = pthread_mutex_init(&gConnMutex, NULL);
+  if (ret) {
+    free(gUri);
+    fprintf(stderr, "fuseConnectInit: pthread_mutex_init failed with error %d\n",
+            ret);
+    return -ret;
+  }
+  RB_INIT(&gConnTree);
+  ret = pthread_create(&gTimerThread, NULL, hdfsConnExpiryThread, NULL);
+  if (ret) {
+    free(gUri);
+    pthread_mutex_destroy(&gConnMutex);
+    fprintf(stderr, "fuseConnectInit: pthread_create failed with error %d\n",
+            ret);
+    return -ret;
+  }
+  fprintf(stderr, "fuseConnectInit: initialized with timer period %d, "
+          "expiry period %d\n", gTimerPeriod, gExpiryPeriod);
   return 0;
   return 0;
 }
 }
 
 
-/*
- * Find a fs handle for the given key. Returns a fs handle, 
- * or NULL if there is no fs for the given key.
+/**
+ * Compare two libhdfs connections by username
+ *
+ * @param a                The first libhdfs connection
+ * @param b                The second libhdfs connection
+ *
+ * @return                 -1, 0, or 1 depending on a < b, a ==b, a > b
+ */
+static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b)
+{
+  return strcmp(a->usrname, b->usrname);
+}
+
+/**
+ * Find a libhdfs connection by username
+ *
+ * @param usrname         The username to look up
+ *
+ * @return                The connection, or NULL if none could be found
  */
  */
-static hdfsFS findFs(char *key) {
-  ENTRY entry;
-  ENTRY *entryP = NULL;
-  entry.key = key;
-  if (0 == hsearch_r(entry, FIND, &entryP, fsTable)) {
-    return NULL;
+static struct hdfsConn* hdfsConnFind(const char *usrname)
+{
+  struct hdfsConn exemplar;
+
+  memset(&exemplar, 0, sizeof(exemplar));
+  exemplar.usrname = (char*)usrname;
+  return RB_FIND(hdfsConnTree, &gConnTree, &exemplar);
+}
+
+/**
+ * Free the resource associated with a libhdfs connection.
+ *
+ * You must remove the connection from the tree before calling this function.
+ *
+ * @param conn            The libhdfs connection
+ */
+static void hdfsConnFree(struct hdfsConn *conn)
+{
+  int ret;
+
+  ret = hdfsDisconnect(conn->fs);
+  if (ret) {
+    fprintf(stderr, "hdfsConnFree(username=%s): "
+      "hdfsDisconnect failed with error %d\n",
+      (conn->usrname ? conn->usrname : "(null)"), ret);
   }
   }
-  assert(NULL != entryP->data);
-  return (hdfsFS)entryP->data;
+  free(conn->usrname);
+  free(conn->kpath);
+  free(conn);
 }
 }
 
 
-/*
- * Insert the given fs handle into the table.
- * Returns 0 on success, -1 on failure.
+/**
+ * Convert a time_t to a string.
+ *
+ * @param sec           time in seconds since the epoch
+ * @param buf           (out param) output buffer
+ * @param bufLen        length of output buffer
+ *
+ * @return              0 on success; ENAMETOOLONG if the provided buffer was
+ *                      too short
  */
  */
-static int insertFs(char *key, hdfsFS fs) {
-  ENTRY entry;
-  ENTRY *entryP = NULL;
-  assert(NULL != fs);
-  entry.key = strdup(key);
-  if (entry.key == NULL) {
-    return -1;
-  }
-  entry.data = (void*)fs;
-  if (0 == hsearch_r(entry, ENTER, &entryP, fsTable)) {
-    return -1;
+static int timeToStr(time_t sec, char *buf, size_t bufLen)
+{
+  struct tm tm, *out;
+  size_t l;
+
+  if (bufLen < TIME_STR_LEN) {
+    return -ENAMETOOLONG;
   }
   }
+  out = localtime_r(&sec, &tm);
+  asctime_r(out, buf);
+  // strip trailing newline
+  l = strlen(buf);
+  if (l != 0)
+    buf[l - 1] = '\0';
   return 0;
   return 0;
 }
 }
 
 
 /** 
 /** 
- * Find out what type of authentication the system administrator
- * has configured.
+ * Check an HDFS connection's Kerberos path.
  *
  *
- * @return     the type of authentication, or AUTH_CONF_UNKNOWN on error.
+ * If the mtime of the Kerberos ticket cache file has changed since we first
+ * opened the connection, mark the connection as condemned and remove it from
+ * the hdfs connection tree.
+ *
+ * @param conn      The HDFS connection
  */
  */
-static enum authConf discoverAuthConf(void)
+static int hdfsConnCheckKpath(const struct hdfsConn *conn)
+{
+  int ret;
+  struct stat st;
+  char prevTimeBuf[TIME_STR_LEN], newTimeBuf[TIME_STR_LEN];
+
+  if (stat(conn->kpath, &st) < 0) {
+    ret = errno;
+    if (ret == ENOENT) {
+      fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): the kerberos "
+              "ticket cache file '%s' has disappeared.  Condemning the "
+              "connection.\n", conn->usrname, conn->kpath);
+    } else {
+      fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): stat(%s) "
+              "failed with error code %d.  Pessimistically condemning the "
+              "connection.\n", conn->usrname, conn->kpath, ret);
+    }
+    return -ret;
+  }
+  if ((st.st_mtim.tv_sec != conn->kPathMtime) ||
+      (st.st_mtim.tv_nsec != conn->kPathMtimeNs)) {
+    timeToStr(conn->kPathMtime, prevTimeBuf, sizeof(prevTimeBuf));
+    timeToStr(st.st_mtim.tv_sec, newTimeBuf, sizeof(newTimeBuf));
+    fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): mtime on '%s' "
+            "has changed from '%s' to '%s'.  Condemning the connection "
+            "because our cached Kerberos credentials have probably "
+            "changed.\n", conn->usrname, conn->kpath, prevTimeBuf, newTimeBuf);
+    return -EINTERNAL;
+  }
+  return 0;
+}
+
+/**
+ * Cache expiration logic.
+ *
+ * This function is called periodically by the cache expiration thread.  For
+ * each FUSE connection not currently in use (refcnt == 0) it will decrement the
+ * expirationCount for that connection.  Once the expirationCount reaches 0 for
+ * a connection, it can be garbage collected.
+ *
+ * We also check to see if the Kerberos credentials have changed.  If so, the
+ * connecton is immediately condemned, even if it is currently in use.
+ */
+static void hdfsConnExpiry(void)
 {
 {
-    int ret;
-    char *val = NULL;
-    enum authConf authConf;
+  struct hdfsConn *conn, *tmpConn;
 
 
-    ret = hdfsConfGet(HADOOP_SECURITY_AUTHENTICATION, &val);
-    if (ret)
-        authConf = AUTH_CONF_UNKNOWN;
-    else if (!strcmp(val, "kerberos"))
-        authConf = AUTH_CONF_KERBEROS;
-    else
-        authConf = AUTH_CONF_OTHER;
-    free(val);
-    return authConf;
+  pthread_mutex_lock(&gConnMutex);
+  RB_FOREACH_SAFE(conn, hdfsConnTree, &gConnTree, tmpConn) {
+    if (conn->kpath) {
+      if (hdfsConnCheckKpath(conn)) {
+        conn->condemned = 1;
+        RB_REMOVE(hdfsConnTree, &gConnTree, conn);
+        if (conn->refcnt == 0) {
+          /* If the connection is not in use by any threads, delete it
+           * immediately.  If it is still in use by some threads, the last
+           * thread using it will clean it up later inside hdfsConnRelease. */
+          hdfsConnFree(conn);
+          continue;
+        }
+      }
+    }
+    if (conn->refcnt == 0) {
+      /* If the connection is not currently in use by a thread, check to see if
+       * it ought to be removed because it's too old. */
+      conn->expirationCount--;
+      if (conn->expirationCount <= 0) {
+        if (conn->condemned) {
+          fprintf(stderr, "hdfsConnExpiry: LOGIC ERROR: condemned connection "
+                  "as %s is still in the tree!\n", conn->usrname);
+        }
+        fprintf(stderr, "hdfsConnExpiry: freeing and removing connection as "
+                "%s because it's now too old.\n", conn->usrname);
+        RB_REMOVE(hdfsConnTree, &gConnTree, conn);
+        hdfsConnFree(conn);
+      }
+    }
+  }
+  pthread_mutex_unlock(&gConnMutex);
 }
 }
 
 
 /**
 /**
@@ -129,9 +378,9 @@ static enum authConf discoverAuthConf(void)
  * @param path          (out param) the path to the ticket cache file
  * @param path          (out param) the path to the ticket cache file
  * @param pathLen       length of the path buffer
  * @param pathLen       length of the path buffer
  */
  */
-static void findKerbTicketCachePath(char *path, size_t pathLen)
+static void findKerbTicketCachePath(struct fuse_context *ctx,
+                                    char *path, size_t pathLen)
 {
 {
-  struct fuse_context *ctx = fuse_get_context();
   FILE *fp = NULL;
   FILE *fp = NULL;
   static const char * const KRB5CCNAME = "\0KRB5CCNAME=";
   static const char * const KRB5CCNAME = "\0KRB5CCNAME=";
   int c = '\0', pathIdx = 0, keyIdx = 0;
   int c = '\0', pathIdx = 0, keyIdx = 0;
@@ -168,72 +417,213 @@ done:
   }
   }
 }
 }
 
 
-/*
- * Connect to the NN as the current user/group.
- * Returns a fs handle on success, or NULL on failure.
+/**
+ * Create a new libhdfs connection.
+ *
+ * @param usrname       Username to use for the new connection
+ * @param ctx           FUSE context to use for the new connection
+ * @param out           (out param) the new libhdfs connection
+ *
+ * @return              0 on success; error code otherwise
  */
  */
-hdfsFS doConnectAsUser(const char *nn_uri, int nn_port) {
-  struct hdfsBuilder *bld;
-  uid_t uid = fuse_get_context()->uid;
-  char *user = getUsername(uid);
-  char kpath[PATH_MAX];
+static int fuseNewConnect(const char *usrname, struct fuse_context *ctx,
+        struct hdfsConn **out)
+{
+  struct hdfsBuilder *bld = NULL;
+  char kpath[PATH_MAX] = { 0 };
+  struct hdfsConn *conn = NULL;
   int ret;
   int ret;
-  hdfsFS fs = NULL;
-  if (NULL == user) {
-    goto done;
-  }
+  struct stat st;
 
 
-  ret = pthread_mutex_lock(&tableMutex);
-  assert(0 == ret);
-
-  fs = findFs(user);
-  if (NULL == fs) {
-    if (hdfsAuthConf == AUTH_CONF_UNKNOWN) {
-      hdfsAuthConf = discoverAuthConf();
-      if (hdfsAuthConf == AUTH_CONF_UNKNOWN) {
-        ERROR("Unable to determine the configured value for %s.",
-              HADOOP_SECURITY_AUTHENTICATION);
-        goto done;
-      }
-    }
-    bld = hdfsNewBuilder();
-    if (!bld) {
-      ERROR("Unable to create hdfs builder");
-      goto done;
-    }
-    hdfsBuilderSetForceNewInstance(bld);
-    hdfsBuilderSetNameNode(bld, nn_uri);
-    if (nn_port) {
-        hdfsBuilderSetNameNodePort(bld, nn_port);
-    }
-    hdfsBuilderSetUserName(bld, user);
-    if (hdfsAuthConf == AUTH_CONF_KERBEROS) {
-      findKerbTicketCachePath(kpath, sizeof(kpath));
-      hdfsBuilderSetKerbTicketCachePath(bld, kpath);
+  conn = calloc(1, sizeof(struct hdfsConn));
+  if (!conn) {
+    fprintf(stderr, "fuseNewConnect: OOM allocating struct hdfsConn\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  bld = hdfsNewBuilder();
+  if (!bld) {
+    fprintf(stderr, "Unable to create hdfs builder\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  /* We always want to get a new FileSystem instance here-- that's why we call
+   * hdfsBuilderSetForceNewInstance.  Otherwise the 'cache condemnation' logic
+   * in hdfsConnExpiry will not work correctly, since FileSystem might re-use the
+   * existing cached connection which we wanted to get rid of.
+   */
+  hdfsBuilderSetForceNewInstance(bld);
+  hdfsBuilderSetNameNode(bld, gUri);
+  if (gPort) {
+    hdfsBuilderSetNameNodePort(bld, gPort);
+  }
+  hdfsBuilderSetUserName(bld, usrname);
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+    findKerbTicketCachePath(ctx, kpath, sizeof(kpath));
+    if (stat(kpath, &st) < 0) {
+      fprintf(stderr, "fuseNewConnect: failed to find Kerberos ticket cache "
+        "file '%s'.  Did you remember to kinit for UID %d?\n",
+        kpath, ctx->uid);
+      ret = -EACCES;
+      goto error;
     }
     }
-    fs = hdfsBuilderConnect(bld);
-    if (NULL == fs) {
-      int err = errno;
-      ERROR("Unable to create fs for user %s: error code %d", user, err);
-      goto done;
+    conn->kPathMtime = st.st_mtim.tv_sec;
+    conn->kPathMtimeNs = st.st_mtim.tv_nsec;
+    hdfsBuilderSetKerbTicketCachePath(bld, kpath);
+    conn->kpath = strdup(kpath);
+    if (!conn->kpath) {
+      fprintf(stderr, "fuseNewConnect: OOM allocating kpath\n");
+      ret = -ENOMEM;
+      goto error;
     }
     }
-    if (-1 == insertFs(user, fs)) {
-      ERROR("Unable to cache fs for user %s", user);
+  }
+  conn->usrname = strdup(usrname);
+  if (!conn->usrname) {
+    fprintf(stderr, "fuseNewConnect: OOM allocating usrname\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  conn->fs = hdfsBuilderConnect(bld);
+  bld = NULL;
+  if (!conn->fs) {
+    ret = errno;
+    fprintf(stderr, "fuseNewConnect(usrname=%s): Unable to create fs: "
+            "error code %d\n", usrname, ret);
+    goto error;
+  }
+  RB_INSERT(hdfsConnTree, &gConnTree, conn);
+  *out = conn;
+  return 0;
+
+error:
+  if (bld) {
+    hdfsFreeBuilder(bld);
+  }
+  if (conn) {
+    free(conn->kpath);
+    free(conn->usrname);
+    free(conn);
+  }
+  return ret;
+}
+
+int fuseConnect(const char *usrname, struct fuse_context *ctx,
+                struct hdfsConn **out)
+{
+  int ret;
+  struct hdfsConn* conn;
+
+  pthread_mutex_lock(&gConnMutex);
+  conn = hdfsConnFind(usrname);
+  if (!conn) {
+    ret = fuseNewConnect(usrname, ctx, &conn);
+    if (ret) {
+      pthread_mutex_unlock(&gConnMutex);
+      fprintf(stderr, "fuseConnect(usrname=%s): fuseNewConnect failed with "
+              "error code %d\n", usrname, ret);
+      return ret;
     }
     }
   }
   }
+  conn->refcnt++;
+  conn->expirationCount = (gExpiryPeriod + gTimerPeriod - 1) / gTimerPeriod;
+  if (conn->expirationCount < 2)
+    conn->expirationCount = 2;
+  pthread_mutex_unlock(&gConnMutex);
+  *out = conn;
+  return 0;
+}
 
 
-done:
-  ret = pthread_mutex_unlock(&tableMutex);
-  assert(0 == ret);
-  free(user);
-  return fs;
+int fuseConnectAsThreadUid(struct hdfsConn **conn)
+{
+  struct fuse_context *ctx;
+  char *usrname;
+  int ret;
+  
+  ctx = fuse_get_context();
+  usrname = getUsername(ctx->uid);
+  ret = fuseConnect(usrname, ctx, conn);
+  free(usrname);
+  return ret;
 }
 }
 
 
-/*
- * We currently cache a fs handle per-user in this module rather
- * than use the FileSystem cache in the java client. Therefore
- * we do not disconnect the fs handle here.
- */
-int doDisconnect(hdfsFS fs) {
+int fuseConnectTest(void)
+{
+  int ret;
+  struct hdfsConn *conn;
+
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+    // TODO: call some method which can tell us whether the FS exists.  In order
+    // to implement this, we have to add a method to FileSystem in order to do
+    // this without valid Kerberos authentication.  See HDFS-3674 for details.
+    return 0;
+  }
+  ret = fuseNewConnect("root", NULL, &conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectTest failed with error code %d\n", ret);
+    return ret;
+  }
+  hdfsConnRelease(conn);
   return 0;
   return 0;
 }
 }
+
+struct hdfs_internal* hdfsConnGetFs(struct hdfsConn *conn)
+{
+  return conn->fs;
+}
+
+void hdfsConnRelease(struct hdfsConn *conn)
+{
+  pthread_mutex_lock(&gConnMutex);
+  conn->refcnt--;
+  if ((conn->refcnt == 0) && (conn->condemned)) {
+    fprintf(stderr, "hdfsConnRelease(usrname=%s): freeing condemend FS!\n",
+      conn->usrname);
+    /* Notice that we're not removing the connection from gConnTree here.
+     * If the connection is condemned, it must have already been removed from
+     * the tree, so that no other threads start using it.
+     */
+    hdfsConnFree(conn);
+  }
+  pthread_mutex_unlock(&gConnMutex);
+}
+
+/**
+ * Get the monotonic time.
+ *
+ * Unlike the wall-clock time, monotonic time only ever goes forward.  If the
+ * user adjusts the time, the monotonic time will not be affected.
+ *
+ * @return        The monotonic time
+ */
+static time_t getMonotonicTime(void)
+{
+  int res;
+  struct timespec ts;
+       
+  res = clock_gettime(CLOCK_MONOTONIC, &ts);
+  if (res)
+    abort();
+  return ts.tv_sec;
+}
+
+/**
+ * FUSE connection expiration thread
+ *
+ */
+static void* hdfsConnExpiryThread(void *v)
+{
+  time_t nextTime, curTime;
+  int waitTime;
+
+  nextTime = getMonotonicTime() + gTimerPeriod;
+  while (1) {
+    curTime = getMonotonicTime();
+    if (curTime >= nextTime) {
+      hdfsConnExpiry();
+      nextTime = curTime + gTimerPeriod;
+    }
+    waitTime = (nextTime - curTime) * 1000;
+    poll(NULL, 0, waitTime);
+  }
+  return NULL;
+}

+ 66 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.h

@@ -19,10 +19,72 @@
 #ifndef __FUSE_CONNECT_H__
 #ifndef __FUSE_CONNECT_H__
 #define __FUSE_CONNECT_H__
 #define __FUSE_CONNECT_H__
 
 
-#include "fuse_dfs.h"
+struct fuse_context;
+struct hdfsConn;
+struct hdfs_internal;
 
 
-hdfsFS doConnectAsUser(const char *nn_uri, int nn_port);
-int doDisconnect(hdfsFS fs);
-int allocFsTable(void);
+/**
+ * Initialize the fuse connection subsystem.
+ *
+ * This must be called before any of the other functions in this module.
+ *
+ * @param nnUri      The NameNode URI
+ * @param port       The NameNode port
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectInit(const char *nnUri, int port);
+
+/**
+ * Get a libhdfs connection.
+ *
+ * If there is an existing connection, it will be reused.  If not, a new one
+ * will be created.
+ *
+ * You must call hdfsConnRelease on the connection you get back!
+ *
+ * @param usrname    The username to use
+ * @param ctx        The FUSE context to use (contains UID, PID of requestor)
+ * @param conn       (out param) The HDFS connection
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnect(const char *usrname, struct fuse_context *ctx,
+                struct hdfsConn **out);
+
+/**
+ * Get a libhdfs connection.
+ *
+ * The same as fuseConnect, except the username will be determined from the FUSE
+ * thread context.
+ *
+ * @param conn       (out param) The HDFS connection
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectAsThreadUid(struct hdfsConn **conn);
+
+/**
+ * Test whether we can connect to the HDFS cluster
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectTest(void);
+
+/**
+ * Get the hdfsFS associated with an hdfsConn.
+ *
+ * @param conn       The hdfsConn
+ *
+ * @return           the hdfsFS
+ */
+struct hdfs_internal* hdfsConnGetFs(struct hdfsConn *conn);
+
+/**
+ * Release an hdfsConn when we're done with it.
+ *
+ * @param conn       The hdfsConn
+ */
+void hdfsConnRelease(struct hdfsConn *conn);
 
 
 #endif
 #endif

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_context_handle.h

@@ -31,8 +31,6 @@
 //
 //
 typedef struct dfs_context_struct {
 typedef struct dfs_context_struct {
   int debug;
   int debug;
-  char *nn_uri;
-  int nn_port;
   int read_only;
   int read_only;
   int usetrash;
   int usetrash;
   int direct_io;
   int direct_io;

+ 23 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_dfs.c

@@ -65,8 +65,19 @@ static struct fuse_operations dfs_oper = {
   .truncate = dfs_truncate,
   .truncate = dfs_truncate,
 };
 };
 
 
+static void print_env_vars(void)
+{
+  const char *cp = getenv("CLASSPATH");
+  const char *ld = getenv("LD_LIBRARY_PATH");
+
+  fprintf(stderr, "LD_LIBRARY_PATH=%s",ld == NULL ? "NULL" : ld);
+  fprintf(stderr, "CLASSPATH=%s",cp == NULL ? "NULL" : cp);
+}
+
 int main(int argc, char *argv[])
 int main(int argc, char *argv[])
 {
 {
+  int ret;
+
   umask(0);
   umask(0);
 
 
   extern const char *program;  
   extern const char *program;  
@@ -106,24 +117,22 @@ int main(int argc, char *argv[])
     exit(0);
     exit(0);
   }
   }
 
 
-  // Check connection as root
+  ret = fuseConnectInit(options.nn_uri, options.nn_port);
+  if (ret) {
+    ERROR("FATAL: dfs_init: fuseConnInit failed with error %d!", ret);
+    print_env_vars();
+    exit(EXIT_FAILURE);
+  }
   if (options.initchecks == 1) {
   if (options.initchecks == 1) {
-    hdfsFS tempFS = hdfsConnectAsUser(options.nn_uri, options.nn_port, "root");
-    if (NULL == tempFS) {
-      const char *cp = getenv("CLASSPATH");
-      const char *ld = getenv("LD_LIBRARY_PATH");
-      ERROR("FATAL: misconfiguration - cannot connect to HDFS");
-      ERROR("LD_LIBRARY_PATH=%s",ld == NULL ? "NULL" : ld);
-      ERROR("CLASSPATH=%s",cp == NULL ? "NULL" : cp);
-      exit(1);
-    }
-    if (doDisconnect(tempFS)) {
-      ERROR("FATAL: unable to disconnect from test filesystem.");
-      exit(1);
+    ret = fuseConnectTest();
+    if (ret) {
+      ERROR("FATAL: dfs_init: fuseConnTest failed with error %d!", ret);
+      print_env_vars();
+      exit(EXIT_FAILURE);
     }
     }
   }
   }
 
 
-  int ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);
+  ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);
   fuse_opt_free_args(&args);
   fuse_opt_free_args(&args);
   return ret;
   return ret;
 }
 }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_file_handle.h

@@ -22,6 +22,8 @@
 #include <hdfs.h>
 #include <hdfs.h>
 #include <pthread.h>
 #include <pthread.h>
 
 
+struct hdfsConn;
+
 /**
 /**
  *
  *
  * dfs_fh_struct is passed around for open files. Fuse provides a hook (the context) 
  * dfs_fh_struct is passed around for open files. Fuse provides a hook (the context) 
@@ -34,10 +36,10 @@
  */
  */
 typedef struct dfs_fh_struct {
 typedef struct dfs_fh_struct {
   hdfsFile hdfsFH;
   hdfsFile hdfsFH;
+  struct hdfsConn *conn;
   char *buf;
   char *buf;
   tSize bufferSize;  //what is the size of the buffer we have
   tSize bufferSize;  //what is the size of the buffer we have
   off_t buffersStartOffset; //where the buffer starts in the file
   off_t buffersStartOffset; //where the buffer starts in the file
-  hdfsFS fs; // for reads/writes need to access as the real user
   pthread_mutex_t mutex;
   pthread_mutex_t mutex;
 } dfs_fh;
 } dfs_fh;
 
 

+ 10 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chmod.c

@@ -23,6 +23,8 @@
 
 
 int dfs_chmod(const char *path, mode_t mode)
 int dfs_chmod(const char *path, mode_t mode)
 {
 {
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   TRACE1("chmod", path)
   TRACE1("chmod", path)
   int ret = 0;
   int ret = 0;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
@@ -31,22 +33,24 @@ int dfs_chmod(const char *path, mode_t mode)
   assert(dfs);
   assert(dfs);
   assert('/' == *path);
   assert('/' == *path);
 
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect to HDFS");
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
     ret = -EIO;
     ret = -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
+  fs = hdfsConnGetFs(conn);
 
 
-  if (hdfsChmod(userFS, path, (short)mode)) {
+  if (hdfsChmod(fs, path, (short)mode)) {
     ERROR("Could not chmod %s to %d", path, (int)mode);
     ERROR("Could not chmod %s to %d", path, (int)mode);
     ret = (errno > 0) ? -errno : -EIO;
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
 
 
   return ret;
   return ret;

+ 12 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c

@@ -25,12 +25,12 @@
 
 
 int dfs_chown(const char *path, uid_t uid, gid_t gid)
 int dfs_chown(const char *path, uid_t uid, gid_t gid)
 {
 {
-  TRACE1("chown", path)
-
+  struct hdfsConn *conn = NULL;
   int ret = 0;
   int ret = 0;
   char *user = NULL;
   char *user = NULL;
   char *group = NULL;
   char *group = NULL;
-  hdfsFS userFS = NULL;
+
+  TRACE1("chown", path)
 
 
   // retrieve dfs specific data
   // retrieve dfs specific data
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
@@ -61,14 +61,15 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
     }
     }
   }
   }
 
 
-  userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect to HDFS");
+  ret = fuseConnect(user, fuse_get_context(), &conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnect: failed to open a libhdfs connection!  "
+            "error %d.\n", ret);
     ret = -EIO;
     ret = -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
 
 
-  if (hdfsChown(userFS, path, user, group)) {
+  if (hdfsChown(hdfsConnGetFs(conn), path, user, group)) {
     ret = errno;
     ret = errno;
     ERROR("Could not chown %s to %d:%d: error %d", path, (int)uid, gid, ret);
     ERROR("Could not chown %s to %d:%d: error %d", path, (int)uid, gid, ret);
     ret = (ret > 0) ? -ret : -EIO;
     ret = (ret > 0) ? -ret : -EIO;
@@ -76,16 +77,11 @@ int dfs_chown(const char *path, uid_t uid, gid_t gid)
   }
   }
 
 
 cleanup:
 cleanup:
-  if (userFS && doDisconnect(userFS)) {
-    ret = -EIO;
-  }
-  if (user) {
-    free(user);
-  }
-  if (group) {
-    free(group);
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
+  free(user);
+  free(group);
 
 
   return ret;
   return ret;
-
 }
 }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c

@@ -16,6 +16,7 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
+#include "fuse_connect.h"
 #include "fuse_dfs.h"
 #include "fuse_dfs.h"
 #include "fuse_impls.h"
 #include "fuse_impls.h"
 #include "fuse_file_handle.h"
 #include "fuse_file_handle.h"
@@ -43,9 +44,7 @@ int dfs_flush(const char *path, struct fuse_file_info *fi) {
     assert(fh);
     assert(fh);
     hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
     hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
     assert(file_handle);
     assert(file_handle);
-
-    assert(fh->fs);
-    if (hdfsFlush(fh->fs, file_handle) != 0) {
+    if (hdfsFlush(hdfsConnGetFs(fh->conn), file_handle) != 0) {
       ERROR("Could not flush %lx for %s\n",(long)file_handle, path);
       ERROR("Could not flush %lx for %s\n",(long)file_handle, path);
       return -EIO;
       return -EIO;
     }
     }

+ 16 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c

@@ -23,22 +23,27 @@
 
 
 int dfs_getattr(const char *path, struct stat *st)
 int dfs_getattr(const char *path, struct stat *st)
 {
 {
-  TRACE1("getattr", path)
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
+  int ret;
+  hdfsFileInfo *info;
 
 
+  TRACE1("getattr", path)
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
-
   assert(dfs);
   assert(dfs);
   assert(path);
   assert(path);
   assert(st);
   assert(st);
 
 
-  hdfsFS fs = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (NULL == fs) {
-    ERROR("Could not connect to %s:%d", dfs->nn_uri, dfs->nn_port);
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
   }
-
-  int ret = 0;
-  hdfsFileInfo *info = hdfsGetPathInfo(fs,path);
+  fs = hdfsConnGetFs(conn);
+  
+  info = hdfsGetPathInfo(fs,path);
   if (NULL == info) {
   if (NULL == info) {
     ret = -ENOENT;
     ret = -ENOENT;
     goto cleanup;
     goto cleanup;
@@ -63,9 +68,8 @@ int dfs_getattr(const char *path, struct stat *st)
   hdfsFreeFileInfo(info,1);
   hdfsFreeFileInfo(info,1);
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(fs)) {
-    ERROR("Could not disconnect from filesystem");
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
   return ret;
   return ret;
 }
 }

+ 17 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c

@@ -23,9 +23,12 @@
 
 
 int dfs_mkdir(const char *path, mode_t mode)
 int dfs_mkdir(const char *path, mode_t mode)
 {
 {
-  TRACE1("mkdir", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int ret;
+
+  TRACE1("mkdir", path)
 
 
   assert(path);
   assert(path);
   assert(dfs);
   assert(dfs);
@@ -41,29 +44,32 @@ int dfs_mkdir(const char *path, mode_t mode)
     return -EACCES;
     return -EACCES;
   }
   }
   
   
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
   }
+  fs = hdfsConnGetFs(conn);
 
 
   // In theory the create and chmod should be atomic.
   // In theory the create and chmod should be atomic.
 
 
-  int ret = 0;
-  if (hdfsCreateDirectory(userFS, path)) {
+  if (hdfsCreateDirectory(fs, path)) {
     ERROR("HDFS could not create directory %s", path);
     ERROR("HDFS could not create directory %s", path);
     ret = (errno > 0) ? -errno : -EIO;
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
 
 
-  if (hdfsChmod(userFS, path, (short)mode)) {
+  if (hdfsChmod(fs, path, (short)mode)) {
     ERROR("Could not chmod %s to %d", path, (int)mode);
     ERROR("Could not chmod %s to %d", path, (int)mode);
     ret = (errno > 0) ? -errno : -EIO;
     ret = (errno > 0) ? -errno : -EIO;
   }
   }
+  ret = 0;
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
   return ret;
   return ret;
 }
 }

+ 48 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c

@@ -21,38 +21,45 @@
 #include "fuse_connect.h"
 #include "fuse_connect.h"
 #include "fuse_file_handle.h"
 #include "fuse_file_handle.h"
 
 
+#include <stdio.h>
+#include <stdlib.h>
+
 int dfs_open(const char *path, struct fuse_file_info *fi)
 int dfs_open(const char *path, struct fuse_file_info *fi)
 {
 {
-  TRACE1("open", path)
-
+  hdfsFS fs = NULL;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  dfs_fh *fh = NULL;
+  int mutexInit = 0, ret;
+
+  TRACE1("open", path)
 
 
   // check params and the context var
   // check params and the context var
   assert(path);
   assert(path);
   assert('/' == *path);
   assert('/' == *path);
   assert(dfs);
   assert(dfs);
 
 
-  int ret = 0;
-
   // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
   // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
   // bugbug figure out what this flag is and report problem to Hadoop JIRA
   // bugbug figure out what this flag is and report problem to Hadoop JIRA
   int flags = (fi->flags & 0x7FFF);
   int flags = (fi->flags & 0x7FFF);
 
 
   // retrieve dfs specific data
   // retrieve dfs specific data
-  dfs_fh *fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
-  if (fh == NULL) {
+  fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
+  if (!fh) {
     ERROR("Malloc of new file handle failed");
     ERROR("Malloc of new file handle failed");
-    return -EIO;
+    ret = -EIO;
+    goto error;
   }
   }
-
-  fh->fs = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (fh->fs == NULL) {
-    ERROR("Could not connect to dfs");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&fh->conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto error;
   }
   }
+  fs = hdfsConnGetFs(fh->conn);
 
 
   if (flags & O_RDWR) {
   if (flags & O_RDWR) {
-    hdfsFileInfo *info = hdfsGetPathInfo(fh->fs,path);
+    hdfsFileInfo *info = hdfsGetPathInfo(fs, path);
     if (info == NULL) {
     if (info == NULL) {
       // File does not exist (maybe?); interpret it as a O_WRONLY
       // File does not exist (maybe?); interpret it as a O_WRONLY
       // If the actual error was something else, we'll get it again when
       // If the actual error was something else, we'll get it again when
@@ -66,15 +73,23 @@ int dfs_open(const char *path, struct fuse_file_info *fi)
     }
     }
   }
   }
 
 
-  if ((fh->hdfsFH = hdfsOpenFile(fh->fs, path, flags,  0, 0, 0)) == NULL) {
+  if ((fh->hdfsFH = hdfsOpenFile(fs, path, flags,  0, 0, 0)) == NULL) {
     ERROR("Could not open file %s (errno=%d)", path, errno);
     ERROR("Could not open file %s (errno=%d)", path, errno);
     if (errno == 0 || errno == EINTERNAL) {
     if (errno == 0 || errno == EINTERNAL) {
-      return -EIO;
+      ret = -EIO;
+      goto error;
     }
     }
-    return -errno;
+    ret = -errno;
+    goto error;
   }
   }
 
 
-  pthread_mutex_init(&fh->mutex, NULL);
+  ret = pthread_mutex_init(&fh->mutex, NULL);
+  if (ret) {
+    fprintf(stderr, "dfs_open: error initializing mutex: error %d\n", ret); 
+    ret = -EIO;
+    goto error;
+  }
+  mutexInit = 1;
 
 
   if (fi->flags & O_WRONLY || fi->flags & O_CREAT) {
   if (fi->flags & O_WRONLY || fi->flags & O_CREAT) {
     fh->buf = NULL;
     fh->buf = NULL;
@@ -84,11 +99,27 @@ int dfs_open(const char *path, struct fuse_file_info *fi)
     if (NULL == fh->buf) {
     if (NULL == fh->buf) {
       ERROR("Could not allocate memory for a read for file %s\n", path);
       ERROR("Could not allocate memory for a read for file %s\n", path);
       ret = -EIO;
       ret = -EIO;
+      goto error;
     }
     }
     fh->buffersStartOffset = 0;
     fh->buffersStartOffset = 0;
     fh->bufferSize = 0;
     fh->bufferSize = 0;
   }
   }
   fi->fh = (uint64_t)fh;
   fi->fh = (uint64_t)fh;
+  return 0;
 
 
+error:
+  if (fh) {
+    if (mutexInit) {
+      pthread_mutex_destroy(&fh->mutex);
+    }
+    free(fh->buf);
+    if (fh->hdfsFH) {
+      hdfsCloseFile(fs, fh->hdfsFH);
+    }
+    if (fh->conn) {
+      hdfsConnRelease(fh->conn);
+    }
+    free(fh);
+  }
   return ret;
   return ret;
 }
 }

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c

@@ -16,9 +16,10 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
+#include "fuse_connect.h"
 #include "fuse_dfs.h"
 #include "fuse_dfs.h"
-#include "fuse_impls.h"
 #include "fuse_file_handle.h"
 #include "fuse_file_handle.h"
+#include "fuse_impls.h"
 
 
 static size_t min(const size_t x, const size_t y) {
 static size_t min(const size_t x, const size_t y) {
   return x < y ? x : y;
   return x < y ? x : y;
@@ -48,9 +49,9 @@ int dfs_read(const char *path, char *buf, size_t size, off_t offset,
   assert(fi);
   assert(fi);
 
 
   dfs_fh *fh = (dfs_fh*)fi->fh;
   dfs_fh *fh = (dfs_fh*)fi->fh;
+  hdfsFS fs = hdfsConnGetFs(fh->conn);
 
 
   assert(fh != NULL);
   assert(fh != NULL);
-  assert(fh->fs != NULL);
   assert(fh->hdfsFH != NULL);
   assert(fh->hdfsFH != NULL);
 
 
   // special case this as simplifies the rest of the logic to know the caller wanted > 0 bytes
   // special case this as simplifies the rest of the logic to know the caller wanted > 0 bytes
@@ -61,7 +62,7 @@ int dfs_read(const char *path, char *buf, size_t size, off_t offset,
   if ( size >= dfs->rdbuffer_size) {
   if ( size >= dfs->rdbuffer_size) {
     int num_read;
     int num_read;
     size_t total_read = 0;
     size_t total_read = 0;
-    while (size - total_read > 0 && (num_read = hdfsPread(fh->fs, fh->hdfsFH, offset + total_read, buf + total_read, size - total_read)) > 0) {
+    while (size - total_read > 0 && (num_read = hdfsPread(fs, fh->hdfsFH, offset + total_read, buf + total_read, size - total_read)) > 0) {
       total_read += num_read;
       total_read += num_read;
     }
     }
     // if there was an error before satisfying the current read, this logic declares it an error
     // if there was an error before satisfying the current read, this logic declares it an error
@@ -98,7 +99,7 @@ int dfs_read(const char *path, char *buf, size_t size, off_t offset,
       size_t total_read = 0;
       size_t total_read = 0;
 
 
       while (dfs->rdbuffer_size  - total_read > 0 &&
       while (dfs->rdbuffer_size  - total_read > 0 &&
-             (num_read = hdfsPread(fh->fs, fh->hdfsFH, offset + total_read, fh->buf + total_read, dfs->rdbuffer_size - total_read)) > 0) {
+             (num_read = hdfsPread(fs, fh->hdfsFH, offset + total_read, fh->buf + total_read, dfs->rdbuffer_size - total_read)) > 0) {
         total_read += num_read;
         total_read += num_read;
       }
       }
 
 

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c

@@ -24,25 +24,31 @@
 int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
 int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
                        off_t offset, struct fuse_file_info *fi)
                        off_t offset, struct fuse_file_info *fi)
 {
 {
-  TRACE1("readdir", path)
+  int ret;
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
 
 
+  TRACE1("readdir", path)
+
   assert(dfs);
   assert(dfs);
   assert(path);
   assert(path);
   assert(buf);
   assert(buf);
 
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
   }
+  fs = hdfsConnGetFs(conn);
 
 
   // Read dirents. Calling a variant that just returns the final path
   // Read dirents. Calling a variant that just returns the final path
   // component (HDFS-975) would save us from parsing it out below.
   // component (HDFS-975) would save us from parsing it out below.
   int numEntries = 0;
   int numEntries = 0;
-  hdfsFileInfo *info = hdfsListDirectory(userFS, path, &numEntries);
+  hdfsFileInfo *info = hdfsListDirectory(fs, path, &numEntries);
 
 
-  int ret = 0;
   // NULL means either the directory doesn't exist or maybe IO error.
   // NULL means either the directory doesn't exist or maybe IO error.
   if (NULL == info) {
   if (NULL == info) {
     ret = (errno > 0) ? -errno : -ENOENT;
     ret = (errno > 0) ? -errno : -ENOENT;
@@ -106,11 +112,11 @@ int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
     }
     }
   // free the info pointers
   // free the info pointers
   hdfsFreeFileInfo(info,numEntries);
   hdfsFreeFileInfo(info,numEntries);
+  ret = 0;
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
-    ERROR("Failed to disconnect %d", errno);
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
   return ret;
   return ret;
 }
 }

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c

@@ -52,15 +52,13 @@ int dfs_release (const char *path, struct fuse_file_info *fi) {
   assert(fh);
   assert(fh);
   hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
   hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
   if (NULL != file_handle) {
   if (NULL != file_handle) {
-    if (hdfsCloseFile(fh->fs, file_handle) != 0) {
+    if (hdfsCloseFile(hdfsConnGetFs(fh->conn), file_handle) != 0) {
       ERROR("Could not close handle %ld for %s\n",(long)file_handle, path);
       ERROR("Could not close handle %ld for %s\n",(long)file_handle, path);
       ret = -EIO;
       ret = -EIO;
     }
     }
   }
   }
   free(fh->buf);
   free(fh->buf);
-  if (doDisconnect(fh->fs)) {
-    ret = -EIO;
-  }
+  hdfsConnRelease(fh->conn);
   pthread_mutex_destroy(&fh->mutex);
   pthread_mutex_destroy(&fh->mutex);
   free(fh);
   free(fh);
   fi->fh = 0;
   fi->fh = 0;

+ 16 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c

@@ -23,10 +23,12 @@
 
 
 int dfs_rename(const char *from, const char *to)
 int dfs_rename(const char *from, const char *to)
 {
 {
-  TRACE1("rename", from) 
-
- // retrieve dfs specific data
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int ret;
+
+  TRACE1("rename", from) 
 
 
   // check params and the context var
   // check params and the context var
   assert(from);
   assert(from);
@@ -46,23 +48,24 @@ int dfs_rename(const char *from, const char *to)
     return -EACCES;
     return -EACCES;
   }
   }
 
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
   }
-
-  int ret = 0;
-  if (hdfsRename(userFS, from, to)) {
+  fs = hdfsConnGetFs(conn);
+  if (hdfsRename(fs, from, to)) {
     ERROR("Rename %s to %s failed", from, to);
     ERROR("Rename %s to %s failed", from, to);
     ret = (errno > 0) ? -errno : -EIO;
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
+  ret = 0;
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
   return ret;
   return ret;
-
 }
 }

+ 26 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c

@@ -25,9 +25,14 @@ extern const char *const TrashPrefixDir;
 
 
 int dfs_rmdir(const char *path)
 int dfs_rmdir(const char *path)
 {
 {
-  TRACE1("rmdir", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
+  int ret;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int numEntries = 0;
+  hdfsFileInfo *info = NULL;
+
+  TRACE1("rmdir", path)
 
 
   assert(path);
   assert(path);
   assert(dfs);
   assert(dfs);
@@ -35,42 +40,43 @@ int dfs_rmdir(const char *path)
 
 
   if (is_protected(path)) {
   if (is_protected(path)) {
     ERROR("Trying to delete protected directory %s", path);
     ERROR("Trying to delete protected directory %s", path);
-    return -EACCES;
+    ret = -EACCES;
+    goto cleanup;
   }
   }
 
 
   if (dfs->read_only) {
   if (dfs->read_only) {
     ERROR("HDFS configured read-only, cannot delete directory %s", path);
     ERROR("HDFS configured read-only, cannot delete directory %s", path);
-    return -EACCES;
-  }
-
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+    ret = -EACCES;
+    goto cleanup;
   }
   }
 
 
-  int ret = 0;
-  int numEntries = 0;
-  hdfsFileInfo *info = hdfsListDirectory(userFS,path,&numEntries);
-
-  if (info) {
-    hdfsFreeFileInfo(info, numEntries);
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
   }
-
+  fs = hdfsConnGetFs(conn);
+  info = hdfsListDirectory(fs, path, &numEntries);
   if (numEntries) {
   if (numEntries) {
     ret = -ENOTEMPTY;
     ret = -ENOTEMPTY;
     goto cleanup;
     goto cleanup;
   }
   }
 
 
-  if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
+  if (hdfsDeleteWithTrash(fs, path, dfs->usetrash)) {
     ERROR("Error trying to delete directory %s", path);
     ERROR("Error trying to delete directory %s", path);
     ret = -EIO;
     ret = -EIO;
     goto cleanup;
     goto cleanup;
   }
   }
+  ret = 0;
 
 
 cleanup:
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (info) {
+    hdfsFreeFileInfo(info, numEntries);
+  }
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   }
   return ret;
   return ret;
 }
 }

Niektoré súbory nie sú zobrazené, pretože je v týchto rozdielových dátach zmenené mnoho súborov