Browse Source

Merge trunk into HA branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1166495 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 years ago
parent
commit
d10631f728
100 changed files with 1599 additions and 438 deletions
  1. 10 10
      dev-support/test-patch.sh
  2. 18 6
      hadoop-common-project/hadoop-auth-examples/pom.xml
  3. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java
  4. 2 2
      hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
  5. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java
  6. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/resources/log4j.properties
  7. 5 5
      hadoop-common-project/hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml
  8. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/webapp/annonymous/index.html
  9. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/webapp/index.html
  10. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/webapp/kerberos/index.html
  11. 1 1
      hadoop-common-project/hadoop-auth-examples/src/main/webapp/simple/index.html
  12. 6 6
      hadoop-common-project/hadoop-auth/BUILDING.txt
  13. 3 3
      hadoop-common-project/hadoop-auth/README.txt
  14. 3 4
      hadoop-common-project/hadoop-auth/pom.xml
  15. 3 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  16. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
  17. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
  18. 4 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  19. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
  20. 11 9
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  21. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
  22. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
  23. 4 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  24. 3 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  25. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  26. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
  27. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java
  28. 7 7
      hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
  29. 13 13
      hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
  30. 13 13
      hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm
  31. 7 7
      hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
  32. 1 1
      hadoop-common-project/hadoop-auth/src/site/site.xml
  33. 2 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
  34. 3 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  35. 1 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  36. 5 5
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
  37. 3 3
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
  38. 4 4
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  39. 2 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
  40. 4 4
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  41. 3 3
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
  42. 2 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  43. 1 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
  44. 30 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  45. 23 1
      hadoop-common-project/hadoop-common/pom.xml
  46. 10 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
  47. 22 0
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  48. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  49. 123 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  50. 13 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  51. 20 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  52. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  53. 38 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java
  54. 30 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  55. 7 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  56. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
  57. 312 37
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  58. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
  59. 69 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
  60. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  61. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
  62. 1 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  63. 5 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  64. 5 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  65. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
  66. 26 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  67. 19 20
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java
  68. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
  69. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
  70. 30 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
  71. 54 34
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  72. 12 21
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
  73. 25 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  74. 64 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
  75. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
  76. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  77. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
  78. 255 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
  79. 31 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
  80. 43 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
  81. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
  82. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
  83. 1 0
      hadoop-common-project/pom.xml
  84. 34 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  85. 51 1
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  86. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  87. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
  88. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  89. 16 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  90. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  91. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  92. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  93. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
  94. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
  95. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
  96. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  97. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
  98. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  99. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
  100. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java

+ 10 - 10
dev-support/test-patch.sh

@@ -249,8 +249,8 @@ setup () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
-  $MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
+  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
+  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
   if [[ $? != 0 ]] ; then
   if [[ $? != 0 ]] ; then
     echo "Trunk compilation is broken?"
     echo "Trunk compilation is broken?"
     cleanupAndExit 1
     cleanupAndExit 1
@@ -366,14 +366,14 @@ checkJavadocWarnings () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
+  echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
   if [ -d hadoop-project ]; then
   if [ -d hadoop-project ]; then
     (cd hadoop-project; $MVN install)
     (cd hadoop-project; $MVN install)
   fi
   fi
   if [ -d hadoop-common-project/hadoop-annotations ]; then  
   if [ -d hadoop-common-project/hadoop-annotations ]; then  
     (cd hadoop-common-project/hadoop-annotations; $MVN install)
     (cd hadoop-common-project/hadoop-annotations; $MVN install)
   fi
   fi
-  $MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
+  $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
   javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
   javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
   echo ""
   echo ""
   echo ""
   echo ""
@@ -404,8 +404,8 @@ checkJavacWarnings () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
-  $MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
+  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
+  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
   if [[ $? != 0 ]] ; then
   if [[ $? != 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
@@ -488,8 +488,8 @@ checkStyle () {
   echo "THIS IS NOT IMPLEMENTED YET"
   echo "THIS IS NOT IMPLEMENTED YET"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess"
-  $MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess
+  echo "$MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess"
+  $MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess
 
 
   JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
   JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
 $JIRA_COMMENT_FOOTER"
 $JIRA_COMMENT_FOOTER"
@@ -520,8 +520,8 @@ checkFindbugsWarnings () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess" 
-  $MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess < /dev/null
+  echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess" 
+  $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null
 
 
   if [ $? != 0 ] ; then
   if [ $? != 0 ] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT

+ 18 - 6
hadoop-common-project/hadoop-auth/src/examples/pom.xml → hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -18,15 +18,15 @@
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
     <version>0.24.0-SNAPSHOT</version>
     <version>0.24.0-SNAPSHOT</version>
-    <relativePath>../hadoop-project</relativePath>
+    <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-alfredo-examples</artifactId>
+  <artifactId>hadoop-auth-examples</artifactId>
   <version>0.24.0-SNAPSHOT</version>
   <version>0.24.0-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
-  <name>Hadoop Alfredo Examples</name>
-  <description>Hadoop Alfredo - Java HTTP SPNEGO Examples</description>
+  <name>Apache Hadoop Auth Examples</name>
+  <description>Apache Hadoop Auth Examples - Java HTTP SPNEGO</description>
 
 
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
@@ -36,7 +36,7 @@
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-alfredo</artifactId>
+      <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -53,6 +53,18 @@
 
 
   <build>
   <build>
     <plugins>
     <plugins>
+      <plugin>
+        <artifactId>maven-war-plugin</artifactId>
+        <configuration>
+          <warName>hadoop-auth-examples</warName>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-deploy-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
       <plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>exec-maven-plugin</artifactId>
         <artifactId>exec-maven-plugin</artifactId>
@@ -64,7 +76,7 @@
           </execution>
           </execution>
         </executions>
         </executions>
         <configuration>
         <configuration>
-          <mainClass>org.apache.hadoop.alfredo.examples.WhoClient</mainClass>
+          <mainClass>org.apache.hadoop.security.authentication.examples.WhoClient</mainClass>
           <arguments>
           <arguments>
             <argument>${url}</argument>
             <argument>${url}</argument>
           </arguments>
           </arguments>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/RequestLoggerFilter.java → hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.examples;
+package org.apache.hadoop.security.authentication.examples;
 
 
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;

+ 2 - 2
hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoClient.java → hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.examples;
+package org.apache.hadoop.security.authentication.examples;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoServlet.java → hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.examples;
+package org.apache.hadoop.security.authentication.examples;
 
 
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServlet;

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/resources/log4j.properties → hadoop-common-project/hadoop-auth-examples/src/main/resources/log4j.properties

@@ -16,4 +16,4 @@ log4j.appender.test.Target=System.out
 log4j.appender.test.layout=org.apache.log4j.PatternLayout
 log4j.appender.test.layout=org.apache.log4j.PatternLayout
 log4j.appender.test.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
 log4j.appender.test.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
 
 
-log4j.logger.org.apache.hadoop.alfredo=DEBUG, test
+log4j.logger.org.apache.hadoop.security.authentication=DEBUG, test

+ 5 - 5
hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/WEB-INF/web.xml → hadoop-common-project/hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml

@@ -16,7 +16,7 @@
 
 
   <servlet>
   <servlet>
     <servlet-name>whoServlet</servlet-name>
     <servlet-name>whoServlet</servlet-name>
-    <servlet-class>org.apache.hadoop.alfredo.examples.WhoServlet</servlet-class>
+    <servlet-class>org.apache.hadoop.security.authentication.examples.WhoServlet</servlet-class>
   </servlet>
   </servlet>
 
 
   <servlet-mapping>
   <servlet-mapping>
@@ -36,12 +36,12 @@
 
 
   <filter>
   <filter>
     <filter-name>requestLoggerFilter</filter-name>
     <filter-name>requestLoggerFilter</filter-name>
-    <filter-class>org.apache.hadoop.alfredo.examples.RequestLoggerFilter</filter-class>
+    <filter-class>org.apache.hadoop.security.authentication.examples.RequestLoggerFilter</filter-class>
   </filter>
   </filter>
 
 
   <filter>
   <filter>
     <filter-name>anonymousFilter</filter-name>
     <filter-name>anonymousFilter</filter-name>
-    <filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
+    <filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
     <init-param>
     <init-param>
       <param-name>type</param-name>
       <param-name>type</param-name>
       <param-value>simple</param-value>
       <param-value>simple</param-value>
@@ -58,7 +58,7 @@
 
 
   <filter>
   <filter>
     <filter-name>simpleFilter</filter-name>
     <filter-name>simpleFilter</filter-name>
-    <filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
+    <filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
     <init-param>
     <init-param>
       <param-name>type</param-name>
       <param-name>type</param-name>
       <param-value>simple</param-value>
       <param-value>simple</param-value>
@@ -75,7 +75,7 @@
 
 
   <filter>
   <filter>
     <filter-name>kerberosFilter</filter-name>
     <filter-name>kerberosFilter</filter-name>
-    <filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
+    <filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
     <init-param>
     <init-param>
       <param-name>type</param-name>
       <param-name>type</param-name>
       <param-value>kerberos</param-value>
       <param-value>kerberos</param-value>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/annonymous/index.html → hadoop-common-project/hadoop-auth-examples/src/main/webapp/annonymous/index.html

@@ -13,6 +13,6 @@
 -->
 -->
 <html>
 <html>
 <body>
 <body>
-<h1>Hello Hadoop Alfredo Pseudo/Simple Authentication with anonymous users!</h1>
+<h1>Hello Hadoop Auth Pseudo/Simple Authentication with anonymous users!</h1>
 </body>
 </body>
 </html>
 </html>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/index.html → hadoop-common-project/hadoop-auth-examples/src/main/webapp/index.html

@@ -13,6 +13,6 @@
 -->
 -->
 <html>
 <html>
 <body>
 <body>
-<h1>Hello Hadoop Alfredo Examples</h1>
+<h1>Hello Hadoop Auth Examples!</h1>
 </body>
 </body>
 </html>
 </html>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/simple/index.html → hadoop-common-project/hadoop-auth-examples/src/main/webapp/kerberos/index.html

@@ -13,6 +13,6 @@
 -->
 -->
 <html>
 <html>
 <body>
 <body>
-<h1>Hello Hadoop Alfredo Pseudo/Simple Authentication!</h1>
+<h1>Hello Hadoop Auth Kerberos SPNEGO Authentication!</h1>
 </body>
 </body>
 </html>
 </html>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/kerberos/index.html → hadoop-common-project/hadoop-auth-examples/src/main/webapp/simple/index.html

@@ -13,6 +13,6 @@
 -->
 -->
 <html>
 <html>
 <body>
 <body>
-<h1>Hello Hadoop Alfredo Kerberos SPNEGO Authentication!</h1>
+<h1>Hello Hadoop Auth Pseudo/Simple Authentication!</h1>
 </body>
 </body>
 </html>
 </html>

+ 6 - 6
hadoop-common-project/hadoop-auth/BUILDING.txt

@@ -1,20 +1,20 @@
 
 
-Build instructions for Hadoop Alfredo
+Build instructions for Hadoop Auth
 
 
 Same as for Hadoop.
 Same as for Hadoop.
 
 
-For more details refer to the Alfredo documentation pages.
+For more details refer to the Hadoop Auth documentation pages.
 
 
 -----------------------------------------------------------------------------
 -----------------------------------------------------------------------------
 Caveats:
 Caveats:
 
 
-* Alfredo has profile to enable Kerberos testcases (testKerberos)
+* Hadoop Auth has profile to enable Kerberos testcases (testKerberos)
 
 
   To run Kerberos testcases a KDC, 2 kerberos principals and a keytab file
   To run Kerberos testcases a KDC, 2 kerberos principals and a keytab file
-  are required (refer to the Alfredo documentation pages for details).
+  are required (refer to the Hadoop Auth documentation pages for details).
 
 
-* Alfredo does not have a distribution profile (dist)
+* Hadoop Auth does not have a distribution profile (dist)
 
 
-* Alfredo does not have a native code profile (native)
+* Hadoop Auth does not have a native code profile (native)
 
 
 -----------------------------------------------------------------------------
 -----------------------------------------------------------------------------

+ 3 - 3
hadoop-common-project/hadoop-auth/README.txt

@@ -1,6 +1,6 @@
-Hadoop Alfredo, Java HTTP SPNEGO
+Hadoop Auth, Java HTTP SPNEGO
 
 
-Hadoop Alfredo is a Java library consisting of a client and a server
+Hadoop Auth is a Java library consisting of a client and a server
 components to enable Kerberos SPNEGO authentication for HTTP.
 components to enable Kerberos SPNEGO authentication for HTTP.
 
 
 The client component is the AuthenticatedURL class.
 The client component is the AuthenticatedURL class.
@@ -10,6 +10,6 @@ The server component is the AuthenticationFilter servlet filter class.
 Authentication mechanisms support is pluggable in both the client and
 Authentication mechanisms support is pluggable in both the client and
 the server components via interfaces.
 the server components via interfaces.
 
 
-In addition to Kerberos SPNEGO, Alfredo also supports Pseudo/Simple
+In addition to Kerberos SPNEGO, Hadoop Auth also supports Pseudo/Simple
 authentication (trusting the value of the query string parameter
 authentication (trusting the value of the query string parameter
 'user.name').
 'user.name').

+ 3 - 4
hadoop-common-project/hadoop-auth/pom.xml

@@ -21,13 +21,12 @@
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
-  <artifactId>hadoop-alfredo</artifactId>
+  <artifactId>hadoop-auth</artifactId>
   <version>0.24.0-SNAPSHOT</version>
   <version>0.24.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
-  <name>Apache Hadoop Alfredo</name>
-  <description>Apache Hadoop Alfredo - Java HTTP SPNEGO</description>
-  <url>http://hadoop.apache.org/alfredo</url>
+  <name>Apache Hadoop Auth</name>
+  <description>Apache Hadoop Auth - Java HTTP SPNEGO</description>
 
 
   <properties>
   <properties>
     <maven.build.timestamp.format>yyyyMMdd</maven.build.timestamp.format>
     <maven.build.timestamp.format>yyyyMMdd</maven.build.timestamp.format>

+ 3 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
@@ -63,7 +63,7 @@ public class AuthenticatedURL {
   /**
   /**
    * Name of the HTTP cookie used for the authentication token between the client and the server.
    * Name of the HTTP cookie used for the authentication token between the client and the server.
    */
    */
-  public static final String AUTH_COOKIE = "alfredo.auth";
+  public static final String AUTH_COOKIE = "hadoop.auth";
 
 
   private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
   private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
 
 

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
 /**
 /**
  * Exception thrown when an authentication error occurrs.
  * Exception thrown when an authentication error occurrs.

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
 
 
 import java.io.IOException;
 import java.io.IOException;

+ 4 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
 import com.sun.security.auth.module.Krb5LoginModule;
 import com.sun.security.auth.module.Krb5LoginModule;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
@@ -48,17 +48,17 @@ public class KerberosAuthenticator implements Authenticator {
   /**
   /**
    * HTTP header used by the SPNEGO server endpoint during an authentication sequence.
    * HTTP header used by the SPNEGO server endpoint during an authentication sequence.
    */
    */
-  public static String WWW_AUTHENTICATE = "WWW-Authenticate";
+  public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
 
 
   /**
   /**
    * HTTP header used by the SPNEGO client endpoint during an authentication sequence.
    * HTTP header used by the SPNEGO client endpoint during an authentication sequence.
    */
    */
-  public static String AUTHORIZATION = "Authorization";
+  public static final String AUTHORIZATION = "Authorization";
 
 
   /**
   /**
    * HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence.
    * HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence.
    */
    */
-  public static String NEGOTIATE = "Negotiate";
+  public static final String NEGOTIATE = "Negotiate";
 
 
   private static final String AUTH_HTTP_METHOD = "OPTIONS";
   private static final String AUTH_HTTP_METHOD = "OPTIONS";
 
 

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;

+ 11 - 9
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -11,12 +11,12 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticatedURL;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.util.Signer;
-import org.apache.hadoop.alfredo.util.SignerException;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.Signer;
+import org.apache.hadoop.security.authentication.util.SignerException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -103,6 +103,8 @@ public class AuthenticationFilter implements Filter {
    */
    */
   public static final String COOKIE_PATH = "cookie.path";
   public static final String COOKIE_PATH = "cookie.path";
 
 
+  private static final Random RAN = new Random();
+
   private Signer signer;
   private Signer signer;
   private AuthenticationHandler authHandler;
   private AuthenticationHandler authHandler;
   private boolean randomSecret;
   private boolean randomSecret;
@@ -139,7 +141,7 @@ public class AuthenticationFilter implements Filter {
     }
     }
 
 
     try {
     try {
-      Class klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
+      Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
       authHandler = (AuthenticationHandler) klass.newInstance();
       authHandler = (AuthenticationHandler) klass.newInstance();
       authHandler.init(config);
       authHandler.init(config);
     } catch (ClassNotFoundException ex) {
     } catch (ClassNotFoundException ex) {
@@ -151,7 +153,7 @@ public class AuthenticationFilter implements Filter {
     }
     }
     String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
     String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
     if (signatureSecret == null) {
     if (signatureSecret == null) {
-      signatureSecret = Long.toString(new Random(System.currentTimeMillis()).nextLong());
+      signatureSecret = Long.toString(RAN.nextLong());
       randomSecret = true;
       randomSecret = true;
       LOG.warn("'signature.secret' configuration not set, using a random value as secret");
       LOG.warn("'signature.secret' configuration not set, using a random value as secret");
     }
     }
@@ -237,7 +239,7 @@ public class AuthenticationFilter implements Filter {
    */
    */
   protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
   protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
     Properties props = new Properties();
     Properties props = new Properties();
-    Enumeration names = filterConfig.getInitParameterNames();
+    Enumeration<?> names = filterConfig.getInitParameterNames();
     while (names.hasMoreElements()) {
     while (names.hasMoreElements()) {
       String name = (String) names.nextElement();
       String name = (String) names.nextElement();
       if (name.startsWith(configPrefix)) {
       if (name.startsWith(configPrefix)) {
@@ -381,7 +383,7 @@ public class AuthenticationFilter implements Filter {
   }
   }
 
 
   /**
   /**
-   * Creates the Alfredo authentiation HTTP cookie.
+   * Creates the Hadoop authentiation HTTP cookie.
    * <p/>
    * <p/>
    * It sets the domain and path specified in the configuration.
    * It sets the domain and path specified in the configuration.
    *
    *

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 
 
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 
 
 import java.security.Principal;
 import java.security.Principal;
 import java.util.Arrays;
 import java.util.Arrays;

+ 4 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -11,13 +11,13 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import com.sun.security.auth.module.Krb5LoginModule;
 import com.sun.security.auth.module.Krb5LoginModule;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.alfredo.util.KerberosName;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSManager;

+ 3 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -11,10 +11,10 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
 
 
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -1,4 +1,4 @@
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
 
 
 /**
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * Licensed to the Apache Software Foundation (ASF) under one

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
 
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 
 

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
 
 
 /**
 /**
  * Exception thrown by {@link Signer} when a string signature is invalid.
  * Exception thrown by {@link Signer} when a string signature is invalid.

+ 7 - 7
hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm

@@ -11,12 +11,12 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 ~~ limitations under the License. See accompanying LICENSE file.
 
 
   ---
   ---
-  Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
+  Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
   ---
   ---
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
 
 
   \[ {{{./index.html}Go Back}} \]
   \[ {{{./index.html}Go Back}} \]
 
 
@@ -50,14 +50,14 @@ $ mvn test -PtestKerberos
   The following Maven <<<-D>>> options can be used to change the default
   The following Maven <<<-D>>> options can be used to change the default
   values:
   values:
 
 
-  * <<<alfredo.test.kerberos.realm>>>: default value <<LOCALHOST>>
+  * <<<hadoop-auth.test.kerberos.realm>>>: default value <<LOCALHOST>>
 
 
-  * <<<alfredo.test.kerberos.client.principal>>>: default value <<client>>
+  * <<<hadoop-auth.test.kerberos.client.principal>>>: default value <<client>>
 
 
-  * <<<alfredo.test.kerberos.server.principal>>>: default value
+  * <<<hadoop-auth.test.kerberos.server.principal>>>: default value
     <<HTTP/localhost>> (it must start 'HTTP/')
     <<HTTP/localhost>> (it must start 'HTTP/')
 
 
-  * <<<alfredo.test.kerberos.keytab.file>>>: default value
+  * <<<hadoop-auth.test.kerberos.keytab.file>>>: default value
     <<${HOME}/${USER}.keytab>>
     <<${HOME}/${USER}.keytab>>
 
 
 ** Generating Documentation
 ** Generating Documentation
@@ -69,7 +69,7 @@ $ mvn package -Pdocs
 +---+
 +---+
 
 
   The generated documentation is available at
   The generated documentation is available at
-  <<<hadoop-alfredo/target/site/>>>.
+  <<<hadoop-auth/target/site/>>>.
 
 
   \[ {{{./index.html}Go Back}} \]
   \[ {{{./index.html}Go Back}} \]
 
 

+ 13 - 13
hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm

@@ -11,30 +11,30 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 ~~ limitations under the License. See accompanying LICENSE file.
 
 
   ---
   ---
-  Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
+  Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
   Configuration
   Configuration
   ---
   ---
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
 Configuration
 Configuration
 
 
   \[ {{{./index.html}Go Back}} \]
   \[ {{{./index.html}Go Back}} \]
 
 
 * Server Side Configuration Setup
 * Server Side Configuration Setup
 
 
-  The {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationFilter.html}
-  AuthenticationFilter filter}} is Alfredo's server side component.
+  The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html}
+  AuthenticationFilter filter}} is Hadoop Auth's server side component.
 
 
   This filter must be configured in front of all the web application resources
   This filter must be configured in front of all the web application resources
   that required authenticated requests. For example:
   that required authenticated requests. For example:
 
 
-  The Alfredo and dependent JAR files must be in the web application classpath
-  (commonly the <<<WEB-INF/lib>>> directory).
+  The Hadoop Auth and dependent JAR files must be in the web application
+  classpath (commonly the <<<WEB-INF/lib>>> directory).
 
 
-  Alfredo uses SLF4J-API for logging. Alfredo Maven POM dependencies define the
-  SLF4J API dependency but it does not define the dependency on a concrete
+  Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define
+  the SLF4J API dependency but it does not define the dependency on a concrete
   logging implementation, this must be addded explicitly to the web
   logging implementation, this must be addded explicitly to the web
   application. For example, if the web applicationan uses Log4j, the
   application. For example, if the web applicationan uses Log4j, the
   SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application
   SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application
@@ -47,7 +47,7 @@ Configuration
 
 
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
     <<<kerberos>>>) or a
     <<<kerberos>>>) or a
-    {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationHandler.html}
+    {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html}
     Authentication handler implementation}}.
     Authentication handler implementation}}.
 
 
   * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
   * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
@@ -80,7 +80,7 @@ Configuration
 
 
     * <<<[PREFIX.]kerberos.keytab>>>: The path to the keytab file containing
     * <<<[PREFIX.]kerberos.keytab>>>: The path to the keytab file containing
       the credentials for the kerberos principal. For example:
       the credentials for the kerberos principal. For example:
-      <<</Users/tucu/alfredo.keytab>>>. There is no default value.
+      <<</Users/tucu/tucu.keytab>>>. There is no default value.
 
 
   <<Example>>:
   <<Example>>:
 
 
@@ -90,7 +90,7 @@ Configuration
 
 
     <filter>
     <filter>
         <filter-name>kerberosFilter</filter-name>
         <filter-name>kerberosFilter</filter-name>
-        <filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
+        <filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
         <init-param>
         <init-param>
             <param-name>type</param-name>
             <param-name>type</param-name>
             <param-value>kerberos</param-value>
             <param-value>kerberos</param-value>
@@ -113,7 +113,7 @@ Configuration
         </init-param>
         </init-param>
         <init-param>
         <init-param>
             <param-name>kerberos.keytab</param-name>
             <param-name>kerberos.keytab</param-name>
-            <param-value>/tmp/alfredo.keytab</param-value>
+            <param-value>/tmp/auth.keytab</param-value>
         </init-param>
         </init-param>
     </filter>
     </filter>
 
 
@@ -146,7 +146,7 @@ Configuration
 
 
     <filter>
     <filter>
         <filter-name>simpleFilter</filter-name>
         <filter-name>simpleFilter</filter-name>
-        <filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
+        <filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
         <init-param>
         <init-param>
             <param-name>type</param-name>
             <param-name>type</param-name>
             <param-value>simple</param-value>
             <param-value>simple</param-value>

+ 13 - 13
hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm

@@ -11,16 +11,16 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 ~~ limitations under the License. See accompanying LICENSE file.
 
 
   ---
   ---
-  Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
+  Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
   ---
   ---
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
 
 
   \[ {{{./index.html}Go Back}} \]
   \[ {{{./index.html}Go Back}} \]
 
 
-* Accessing a Alfredo protected URL Using a browser
+* Accessing a Hadoop Auth protected URL Using a browser
 
 
   <<IMPORTANT:>> The browser must support HTTP Kerberos SPNEGO. For example,
   <<IMPORTANT:>> The browser must support HTTP Kerberos SPNEGO. For example,
   Firefox or Internet Explorer.
   Firefox or Internet Explorer.
@@ -31,7 +31,7 @@ Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
   the domain of the web server that is HTTP Kerberos SPNEGO protected (if using
   the domain of the web server that is HTTP Kerberos SPNEGO protected (if using
   multiple domains and hostname use comma to separate them).
   multiple domains and hostname use comma to separate them).
   
   
-* Accessing a Alfredo protected URL Using <<<curl>>>
+* Accessing a Hadoop Auth protected URL Using <<<curl>>>
 
 
   <<IMPORTANT:>> The <<<curl>>> version must support GSS, run <<<curl -V>>>.
   <<IMPORTANT:>> The <<<curl>>> version must support GSS, run <<<curl -V>>>.
 
 
@@ -48,10 +48,10 @@ Features: GSS-Negotiate IPv6 Largefile NTLM SSL libz
 +---+
 +---+
 $ kinit
 $ kinit
 Please enter the password for tucu@LOCALHOST:
 Please enter the password for tucu@LOCALHOST:
-$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
+$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
 Enter host password for user 'tucu':
 Enter host password for user 'tucu':
 
 
-Hello Alfredo!
+Hello Hadoop Auth Examples!
 +---+
 +---+
 
 
   * The <<<--negotiate>>> option enables SPNEGO in <<<curl>>>.
   * The <<<--negotiate>>> option enables SPNEGO in <<<curl>>>.
@@ -68,7 +68,7 @@ Hello Alfredo!
 
 
 +---+
 +---+
 ...
 ...
-URL url = new URL("http://localhost:8080/alfredo/kerberos/who");
+URL url = new URL("http://localhost:8080/hadoop-auth/kerberos/who");
 AuthenticatedURL.Token token = new AuthenticatedURL.Token();
 AuthenticatedURL.Token token = new AuthenticatedURL.Token();
 ...
 ...
 HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
 HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
@@ -79,12 +79,12 @@ conn = new AuthenticatedURL(url, token).openConnection();
 
 
 * Building and Running the Examples
 * Building and Running the Examples
 
 
-  Download Alfredo's source code, the examples are in the
+  Download Hadoop-Auth's source code, the examples are in the
   <<<src/main/examples>>> directory.
   <<<src/main/examples>>> directory.
 
 
 ** Server Example:
 ** Server Example:
 
 
-  Edit the <<<src/main/examples/src/main/webapp/WEB-INF/web.xml>>> and set the
+  Edit the <<<hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml>>> and set the
   right configuration init parameters for the <<<AuthenticationFilter>>>
   right configuration init parameters for the <<<AuthenticationFilter>>>
   definition configured for Kerberos (the right Kerberos principal and keytab
   definition configured for Kerberos (the right Kerberos principal and keytab
   file must be specified). Refer to the {{{./Configuration.html}Configuration
   file must be specified). Refer to the {{{./Configuration.html}Configuration
@@ -106,11 +106,11 @@ conn = new AuthenticatedURL(url, token).openConnection();
 $ kinit
 $ kinit
 Please enter the password for tucu@LOCALHOST:
 Please enter the password for tucu@LOCALHOST:
 
 
-$ curl http://localhost:8080/alfredo-examples/anonymous/who
+$ curl http://localhost:8080/hadoop-auth-examples/anonymous/who
 
 
-$ curl http://localhost:8080/alfredo-examples/simple/who?user.name=foo
+$ curl http://localhost:8080/hadoop-auth-examples/simple/who?user.name=foo
 
 
-$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
+$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
 +---+
 +---+
 
 
 ** Accessing the server using the Java client example
 ** Accessing the server using the Java client example
@@ -121,7 +121,7 @@ Please enter the password for tucu@LOCALHOST:
 
 
 $ cd examples
 $ cd examples
 
 
-$ mvn exec:java -Durl=http://localhost:8080/alfredo-examples/kerberos/who
+$ mvn exec:java -Durl=http://localhost:8080/hadoop-auth-examples/kerberos/who
 
 
 ....
 ....
 
 

+ 7 - 7
hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm

@@ -11,27 +11,27 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 ~~ limitations under the License. See accompanying LICENSE file.
 
 
   ---
   ---
-  Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
+  Hadoop Auth, Java HTTP SPNEGO ${project.version}
   ---
   ---
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
+Hadoop Auth, Java HTTP SPNEGO ${project.version}
 
 
-  Hadoop Alfredo is a Java library consisting of a client and a server
+  Hadoop Auth is a Java library consisting of a client and a server
   components to enable Kerberos SPNEGO authentication for HTTP.
   components to enable Kerberos SPNEGO authentication for HTTP.
 
 
-  Alfredo also supports additional authentication mechanisms on the client
+  Hadoop Auth also supports additional authentication mechanisms on the client
   and the server side via 2 simple interfaces.
   and the server side via 2 simple interfaces.
 
 
 * License
 * License
 
 
-  Alfredo is distributed under {{{http://www.apache.org/licenses/}Apache
+  Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache
   License 2.0}}.
   License 2.0}}.
 
 
-* How Does Alfredo Works?
+* How Does Auth Works?
 
 
-  Alfredo enforces authentication on protected resources, once authentiation
+  Hadoop Auth enforces authentication on protected resources, once authentiation
   has been established it sets a signed HTTP Cookie that contains an
   has been established it sets a signed HTTP Cookie that contains an
   authentication token with the user name, user principal, authentication type
   authentication token with the user name, user principal, authentication type
   and expiration time.
   and expiration time.

+ 1 - 1
hadoop-common-project/hadoop-auth/src/site/site.xml

@@ -11,7 +11,7 @@
  See the License for the specific language governing permissions and
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
  limitations under the License. See accompanying LICENSE file.
 -->
 -->
-<project name="Hadoop Alfredo">
+<project name="Hadoop Auth">
 
 
   <version position="right"/>
   <version position="right"/>
 
 

+ 2 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo;
+package org.apache.hadoop.security.authentication;
 
 
 import com.sun.security.auth.module.Krb5LoginModule;
 import com.sun.security.auth.module.Krb5LoginModule;
 
 
@@ -34,7 +34,7 @@ import java.util.concurrent.Callable;
  * Test helper class for Java Kerberos setup.
  * Test helper class for Java Kerberos setup.
  */
  */
 public class KerberosTestUtils {
 public class KerberosTestUtils {
-  private static final String PREFIX = "alfredo.test.";
+  private static final String PREFIX = "hadoop-auth.test.";
 
 
   public static final String REALM = PREFIX + "kerberos.realm";
   public static final String REALM = PREFIX + "kerberos.realm";
 
 

+ 3 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.Context;
@@ -57,6 +57,7 @@ public abstract class AuthenticatorTestCase extends TestCase {
     }
     }
   }
   }
 
 
+  @SuppressWarnings("serial")
   public static class TestServlet extends HttpServlet {
   public static class TestServlet extends HttpServlet {
 
 
     @Override
     @Override

+ 1 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.mockito.Mockito;
 import org.mockito.Mockito;

+ 5 - 5
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java

@@ -11,12 +11,12 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
-import org.apache.hadoop.alfredo.KerberosTestUtils;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
-import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
-import org.apache.hadoop.alfredo.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 
 
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;

+ 3 - 3
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java

@@ -11,10 +11,10 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
 
 
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
-import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
 
 
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;

+ 4 - 4
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -11,11 +11,11 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticatedURL;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.util.Signer;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.Signer;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;

+ 2 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java

@@ -11,9 +11,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
 public class TestAuthenticationToken extends TestCase {
 public class TestAuthenticationToken extends TestCase {

+ 4 - 4
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -11,11 +11,11 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.KerberosTestUtils;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;

+ 3 - 3
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java

@@ -11,11 +11,11 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
 
 
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
-import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;

+ 2 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -1,4 +1,4 @@
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
 
 
 /**
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -20,7 +20,7 @@ package org.apache.hadoop.alfredo.util;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.alfredo.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;

+ 1 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java → hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java

@@ -11,7 +11,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License. See accompanying LICENSE file.
  * limitations under the License. See accompanying LICENSE file.
  */
  */
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 

+ 30 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -2,6 +2,16 @@ Hadoop Change Log
 
 
 Trunk (unreleased changes)
 Trunk (unreleased changes)
 
 
+  IMPROVEMENTS
+
+    HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
+  HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
+
+  BUGS
+
+    HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
+                 by Jersey (Alejandro Abdelnur via atm)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -347,6 +357,20 @@ Release 0.23.0 - Unreleased
     HADOOP-7547. Add generic type in WritableComparable subclasses.
     HADOOP-7547. Add generic type in WritableComparable subclasses.
     (Uma Maheswara Rao G via szetszwo)
     (Uma Maheswara Rao G via szetszwo)
 
 
+    HADOOP-7579. Rename package names from alfredo to auth.
+    (Alejandro Abdelnur via szetszwo)
+
+    HADOOP-7594. Support HTTP REST in HttpServer.  (szetszwo)
+
+    HADOOP-7552. FileUtil#fullyDelete doesn't throw IOE but lists it
+    in the throws clause. (eli)
+
+    HADOOP-7580. Add a version of getLocalPathForWrite to LocalDirAllocator
+    which doesn't create dirs. (Chris Douglas & Siddharth Seth via acmurthy) 
+
+    HADOOP-7507. Allow ganglia metrics to include the metrics system tags
+                 in the gmetric names. (Alejandro Abdelnur via todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
   
   
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@@ -533,6 +557,12 @@ Release 0.23.0 - Unreleased
     HADOOP-7560. Change src layout to be heirarchical. (Alejandro Abdelnur
     HADOOP-7560. Change src layout to be heirarchical. (Alejandro Abdelnur
     via acmurthy)
     via acmurthy)
 
 
+    HADOOP-7576. Fix findbugs warnings and javac warnings in hadoop-auth.
+    (szetszwo)
+
+    HADOOP-7593. Fix AssertionError in TestHttpServer.testMaxThreads().
+    (Uma Maheswara Rao G via szetszwo)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 23 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -92,6 +92,28 @@
       <artifactId>jetty-util</artifactId>
       <artifactId>jetty-util</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+
+    <dependency>
+      <groupId>asm</groupId>
+      <artifactId>asm</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-json</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
     <dependency>
     <dependency>
       <groupId>tomcat</groupId>
       <groupId>tomcat</groupId>
       <artifactId>jasper-compiler</artifactId>
       <artifactId>jasper-compiler</artifactId>
@@ -239,7 +261,7 @@
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-alfredo</artifactId>
+      <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties

@@ -43,6 +43,16 @@
 #*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
 #*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
 #*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
 #*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
 
 
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with 
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+#*.sink.ganglia.tagsForPrefix.fairscheduler=
+
 #namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 #namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 
 
 #datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 #datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -149,3 +149,25 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 #log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
 #log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
 #log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 #log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 #log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Yarn ResourceManager Application Summary Log 
+#
+# Set the ResourceManager summary log filename
+#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# Appender for ResourceManager Application Summary Log - rolled daily
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 
@@ -88,7 +87,7 @@ public class FileUtil {
    * (4) If dir is a normal directory, then dir and all its contents recursively
    * (4) If dir is a normal directory, then dir and all its contents recursively
    *     are deleted.
    *     are deleted.
    */
    */
-  public static boolean fullyDelete(File dir) throws IOException {
+  public static boolean fullyDelete(File dir) {
     if (dir.delete()) {
     if (dir.delete()) {
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // (d) symlink to a directory
       // (d) symlink to a directory
@@ -108,7 +107,7 @@ public class FileUtil {
    * If dir is a symlink to a directory, all the contents of the actual
    * If dir is a symlink to a directory, all the contents of the actual
    * directory pointed to by dir will be deleted.
    * directory pointed to by dir will be deleted.
    */
    */
-  public static boolean fullyDeleteContents(File dir) throws IOException {
+  public static boolean fullyDeleteContents(File dir) {
     boolean deletionSucceeded = true;
     boolean deletionSucceeded = true;
     File contents[] = dir.listFiles();
     File contents[] = dir.listFiles();
     if (contents != null) {
     if (contents != null) {

+ 123 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -128,8 +128,26 @@ public class LocalDirAllocator {
    */
    */
   public Path getLocalPathForWrite(String pathStr, long size, 
   public Path getLocalPathForWrite(String pathStr, long size, 
       Configuration conf) throws IOException {
       Configuration conf) throws IOException {
+    return getLocalPathForWrite(pathStr, size, conf, true);
+  }
+  
+  /** Get a path from the local FS. Pass size as 
+   *  SIZE_UNKNOWN if not known apriori. We
+   *  round-robin over the set of disks (via the configured dirs) and return
+   *  the first complete path which has enough space 
+   *  @param pathStr the requested path (this will be created on the first 
+   *  available disk)
+   *  @param size the size of the file that is going to be written
+   *  @param conf the Configuration object
+   *  @param checkWrite ensure that the path is writable
+   *  @return the complete path to the file on a local disk
+   *  @throws IOException
+   */
+  public Path getLocalPathForWrite(String pathStr, long size, 
+                                   Configuration conf,
+                                   boolean checkWrite) throws IOException {
     AllocatorPerContext context = obtainContext(contextCfgItemName);
     AllocatorPerContext context = obtainContext(contextCfgItemName);
-    return context.getLocalPathForWrite(pathStr, size, conf);
+    return context.getLocalPathForWrite(pathStr, size, conf, checkWrite);
   }
   }
   
   
   /** Get a path from the local FS for reading. We search through all the
   /** Get a path from the local FS for reading. We search through all the
@@ -145,6 +163,23 @@ public class LocalDirAllocator {
     AllocatorPerContext context = obtainContext(contextCfgItemName);
     AllocatorPerContext context = obtainContext(contextCfgItemName);
     return context.getLocalPathToRead(pathStr, conf);
     return context.getLocalPathToRead(pathStr, conf);
   }
   }
+  
+  /**
+   * Get all of the paths that currently exist in the working directories.
+   * @param pathStr the path underneath the roots
+   * @param conf the configuration to look up the roots in
+   * @return all of the paths that exist under any of the roots
+   * @throws IOException
+   */
+  public Iterable<Path> getAllLocalPathsToRead(String pathStr, 
+                                               Configuration conf
+                                               ) throws IOException {
+    AllocatorPerContext context;
+    synchronized (this) {
+      context = obtainContext(contextCfgItemName);
+    }
+    return context.getAllLocalPathsToRead(pathStr, conf);    
+  }
 
 
   /** Creates a temporary file in the local FS. Pass size as -1 if not known 
   /** Creates a temporary file in the local FS. Pass size as -1 if not known 
    *  apriori. We round-robin over the set of disks (via the configured dirs) 
    *  apriori. We round-robin over the set of disks (via the configured dirs) 
@@ -214,7 +249,8 @@ public class LocalDirAllocator {
     /** This method gets called everytime before any read/write to make sure
     /** This method gets called everytime before any read/write to make sure
      * that any change to localDirs is reflected immediately.
      * that any change to localDirs is reflected immediately.
      */
      */
-    private void confChanged(Configuration conf) throws IOException {
+    private synchronized void confChanged(Configuration conf) 
+        throws IOException {
       String newLocalDirs = conf.get(contextCfgItemName);
       String newLocalDirs = conf.get(contextCfgItemName);
       if (!newLocalDirs.equals(savedLocalDirs)) {
       if (!newLocalDirs.equals(savedLocalDirs)) {
         localDirs = conf.getTrimmedStrings(contextCfgItemName);
         localDirs = conf.getTrimmedStrings(contextCfgItemName);
@@ -251,18 +287,22 @@ public class LocalDirAllocator {
       }
       }
     }
     }
 
 
-    private Path createPath(String path) throws IOException {
+    private Path createPath(String path, 
+        boolean checkWrite) throws IOException {
       Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
       Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
                                     path);
                                     path);
-      //check whether we are able to create a directory here. If the disk
-      //happens to be RDONLY we will fail
-      try {
-        DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
-        return file;
-      } catch (DiskErrorException d) {
-        LOG.warn("Disk Error Exception: ", d);
-        return null;
+      if (checkWrite) {
+        //check whether we are able to create a directory here. If the disk
+        //happens to be RDONLY we will fail
+        try {
+          DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
+          return file;
+        } catch (DiskErrorException d) {
+          LOG.warn("Disk Error Exception: ", d);
+          return null;
+        }
       }
       }
+      return file;
     }
     }
 
 
     /**
     /**
@@ -272,17 +312,6 @@ public class LocalDirAllocator {
     int getCurrentDirectoryIndex() {
     int getCurrentDirectoryIndex() {
       return dirNumLastAccessed;
       return dirNumLastAccessed;
     }
     }
-    
-    /** Get a path from the local FS. This method should be used if the size of 
-     *  the file is not known a priori. 
-     *  
-     *  It will use roulette selection, picking directories
-     *  with probability proportional to their available space. 
-     */
-    public synchronized Path getLocalPathForWrite(String path, 
-        Configuration conf) throws IOException {
-      return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
-    }
 
 
     /** Get a path from the local FS. If size is known, we go
     /** Get a path from the local FS. If size is known, we go
      *  round-robin over the set of disks (via the configured dirs) and return
      *  round-robin over the set of disks (via the configured dirs) and return
@@ -292,7 +321,7 @@ public class LocalDirAllocator {
      *  with probability proportional to their available space.
      *  with probability proportional to their available space.
      */
      */
     public synchronized Path getLocalPathForWrite(String pathStr, long size, 
     public synchronized Path getLocalPathForWrite(String pathStr, long size, 
-        Configuration conf) throws IOException {
+        Configuration conf, boolean checkWrite) throws IOException {
       confChanged(conf);
       confChanged(conf);
       int numDirs = localDirs.length;
       int numDirs = localDirs.length;
       int numDirsSearched = 0;
       int numDirsSearched = 0;
@@ -324,7 +353,7 @@ public class LocalDirAllocator {
             dir++;
             dir++;
           }
           }
           dirNumLastAccessed = dir;
           dirNumLastAccessed = dir;
-          returnPath = createPath(pathStr);
+          returnPath = createPath(pathStr, checkWrite);
           if (returnPath == null) {
           if (returnPath == null) {
             totalAvailable -= availableOnDisk[dir];
             totalAvailable -= availableOnDisk[dir];
             availableOnDisk[dir] = 0; // skip this disk
             availableOnDisk[dir] = 0; // skip this disk
@@ -335,7 +364,7 @@ public class LocalDirAllocator {
         while (numDirsSearched < numDirs && returnPath == null) {
         while (numDirsSearched < numDirs && returnPath == null) {
           long capacity = dirDF[dirNumLastAccessed].getAvailable();
           long capacity = dirDF[dirNumLastAccessed].getAvailable();
           if (capacity > size) {
           if (capacity > size) {
-            returnPath = createPath(pathStr);
+            returnPath = createPath(pathStr, checkWrite);
           }
           }
           dirNumLastAccessed++;
           dirNumLastAccessed++;
           dirNumLastAccessed = dirNumLastAccessed % numDirs; 
           dirNumLastAccessed = dirNumLastAccessed % numDirs; 
@@ -361,7 +390,7 @@ public class LocalDirAllocator {
         Configuration conf) throws IOException {
         Configuration conf) throws IOException {
 
 
       // find an appropriate directory
       // find an appropriate directory
-      Path path = getLocalPathForWrite(pathStr, size, conf);
+      Path path = getLocalPathForWrite(pathStr, size, conf, true);
       File dir = new File(path.getParent().toUri().getPath());
       File dir = new File(path.getParent().toUri().getPath());
       String prefix = path.getName();
       String prefix = path.getName();
 
 
@@ -398,6 +427,74 @@ public class LocalDirAllocator {
       " the configured local directories");
       " the configured local directories");
     }
     }
 
 
+    private static class PathIterator implements Iterator<Path>, Iterable<Path> {
+      private final FileSystem fs;
+      private final String pathStr;
+      private int i = 0;
+      private final String[] rootDirs;
+      private Path next = null;
+
+      private PathIterator(FileSystem fs, String pathStr, String[] rootDirs)
+          throws IOException {
+        this.fs = fs;
+        this.pathStr = pathStr;
+        this.rootDirs = rootDirs;
+        advance();
+      }
+
+      @Override
+      public boolean hasNext() {
+        return next != null;
+      }
+
+      private void advance() throws IOException {
+        while (i < rootDirs.length) {
+          next = new Path(rootDirs[i++], pathStr);
+          if (fs.exists(next)) {
+            return;
+          }
+        }
+        next = null;
+      }
+
+      @Override
+      public Path next() {
+        Path result = next;
+        try {
+          advance();
+        } catch (IOException ie) {
+          throw new RuntimeException("Can't check existance of " + next, ie);
+        }
+        return result;
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException("read only iterator");
+      }
+
+      @Override
+      public Iterator<Path> iterator() {
+        return this;
+      }
+    }
+
+    /**
+     * Get all of the paths that currently exist in the working directories.
+     * @param pathStr the path underneath the roots
+     * @param conf the configuration to look up the roots in
+     * @return all of the paths that exist under any of the roots
+     * @throws IOException
+     */
+    synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr,
+        Configuration conf) throws IOException {
+      confChanged(conf);
+      if (pathStr.startsWith("/")) {
+        pathStr = pathStr.substring(1);
+      }
+      return new PathIterator(localFS, pathStr, localDirs);
+    }
+
     /** We search through all the configured dirs for the file's existence
     /** We search through all the configured dirs for the file's existence
      *  and return true when we find one 
      *  and return true when we find one 
      */
      */

+ 13 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -228,10 +228,10 @@ public class RawLocalFileSystem extends FileSystem {
   public FSDataOutputStream append(Path f, int bufferSize,
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
     if (!exists(f)) {
     if (!exists(f)) {
-      throw new FileNotFoundException("File " + f + " not found.");
+      throw new FileNotFoundException("File " + f + " not found");
     }
     }
     if (getFileStatus(f).isDirectory()) {
     if (getFileStatus(f).isDirectory()) {
-      throw new IOException("Cannot append to a diretory (=" + f + " ).");
+      throw new IOException("Cannot append to a diretory (=" + f + " )");
     }
     }
     return new FSDataOutputStream(new BufferedOutputStream(
     return new FSDataOutputStream(new BufferedOutputStream(
         new LocalFSFileOutputStream(f, true), bufferSize), statistics);
         new LocalFSFileOutputStream(f, true), bufferSize), statistics);
@@ -242,7 +242,7 @@ public class RawLocalFileSystem extends FileSystem {
     short replication, long blockSize, Progressable progress)
     short replication, long blockSize, Progressable progress)
     throws IOException {
     throws IOException {
     if (exists(f) && !overwrite) {
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists:"+f);
+      throw new IOException("File already exists: "+f);
     }
     }
     Path parent = f.getParent();
     Path parent = f.getParent();
     if (parent != null && !mkdirs(parent)) {
     if (parent != null && !mkdirs(parent)) {
@@ -271,11 +271,18 @@ public class RawLocalFileSystem extends FileSystem {
     return FileUtil.copy(this, src, this, dst, true, getConf());
     return FileUtil.copy(this, src, this, dst, true, getConf());
   }
   }
   
   
+  /**
+   * Delete the given path to a file or directory.
+   * @param p the path to delete
+   * @param recursive to delete sub-directories
+   * @return true if the file or directory and all its contents were deleted
+   * @throws IOException if p is non-empty and recursive is false 
+   */
   public boolean delete(Path p, boolean recursive) throws IOException {
   public boolean delete(Path p, boolean recursive) throws IOException {
     File f = pathToFile(p);
     File f = pathToFile(p);
     if (f.isFile()) {
     if (f.isFile()) {
       return f.delete();
       return f.delete();
-    } else if ((!recursive) && f.isDirectory() && 
+    } else if (!recursive && f.isDirectory() && 
         (FileUtil.listFiles(f).length != 0)) {
         (FileUtil.listFiles(f).length != 0)) {
       throw new IOException("Directory " + f.toString() + " is not empty");
       throw new IOException("Directory " + f.toString() + " is not empty");
     }
     }
@@ -287,7 +294,7 @@ public class RawLocalFileSystem extends FileSystem {
     FileStatus[] results;
     FileStatus[] results;
 
 
     if (!localf.exists()) {
     if (!localf.exists()) {
-      throw new FileNotFoundException("File " + f + " does not exist.");
+      throw new FileNotFoundException("File " + f + " does not exist");
     }
     }
     if (localf.isFile()) {
     if (localf.isFile()) {
       return new FileStatus[] {
       return new FileStatus[] {
@@ -421,7 +428,7 @@ public class RawLocalFileSystem extends FileSystem {
     if (path.exists()) {
     if (path.exists()) {
       return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
       return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
     } else {
     } else {
-      throw new FileNotFoundException("File " + f + " does not exist.");
+      throw new FileNotFoundException("File " + f + " does not exist");
     }
     }
   }
   }
 
 

+ 20 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -48,16 +48,12 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.http.AdminAuthorizedServlet;
-import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
 import org.apache.hadoop.metrics.MetricsServlet;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.mortbay.io.Buffer;
 import org.mortbay.io.Buffer;
@@ -79,6 +75,8 @@ import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.thread.QueuedThreadPool;
 import org.mortbay.thread.QueuedThreadPool;
 import org.mortbay.util.MultiException;
 import org.mortbay.util.MultiException;
 
 
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
 /**
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal
  * Create a Jetty embedded server to answer http requests. The primary goal
  * is to serve up status information for the server.
  * is to serve up status information for the server.
@@ -178,7 +176,7 @@ public class HttpServer implements FilterContainer {
 
 
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
-    // default value (currently 254).
+    // default value (currently 250).
     QueuedThreadPool threadPool = maxThreads == -1 ?
     QueuedThreadPool threadPool = maxThreads == -1 ?
         new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
         new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
     webServer.setThreadPool(threadPool);
     webServer.setThreadPool(threadPool);
@@ -325,6 +323,22 @@ public class HttpServer implements FilterContainer {
     webAppContext.setAttribute(name, value);
     webAppContext.setAttribute(name, value);
   }
   }
 
 
+  /** 
+   * Add a Jersey resource package.
+   * @param packageName The Java package name containing the Jersey resource.
+   * @param pathSpec The path spec for the servlet
+   */
+  public void addJerseyResourcePackage(final String packageName,
+      final String pathSpec) {
+    LOG.info("addJerseyResourcePackage: packageName=" + packageName
+        + ", pathSpec=" + pathSpec);
+    final ServletHolder sh = new ServletHolder(ServletContainer.class);
+    sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+        "com.sun.jersey.api.core.PackagesResourceConfig");
+    sh.setInitParameter("com.sun.jersey.config.property.packages", packageName);
+    webAppContext.addServlet(sh, pathSpec);
+  }
+
   /**
   /**
    * Add a servlet in the server.
    * Add a servlet in the server.
    * @param name The name of the servlet (can be passed as null)
    * @param name The name of the servlet (can be passed as null)

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -285,8 +285,8 @@ public class Client {
         authMethod = AuthMethod.KERBEROS;
         authMethod = AuthMethod.KERBEROS;
       }
       }
       
       
-      header = new ConnectionHeader(protocol == null ? null : protocol
-          .getName(), ticket, authMethod);
+      header = 
+        new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod);
       
       
       if (LOG.isDebugEnabled())
       if (LOG.isDebugEnabled())
         LOG.debug("Use " + authMethod + " authentication for protocol "
         LOG.debug("Use " + authMethod + " authentication for protocol "

+ 38 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+
+/**
+ * The protocol name that is used when a client and server connect.
+ * By default the class name of the protocol interface is the protocol name.
+ * 
+ * Why override the default name (i.e. the class name)?
+ * One use case overriding the default name (i.e. the class name) is when
+ * there are multiple implementations of the same protocol, each with say a
+ *  different version/serialization.
+ * In Hadoop this is used to allow multiple server and client adapters
+ * for different versions of the same protocol service.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+public @interface ProtocolInfo {
+  String protocolName();  // the name of the protocol (i.e. rpc service)
+}

+ 30 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -62,6 +62,20 @@ import org.apache.hadoop.util.ReflectionUtils;
  */
  */
 public class RPC {
 public class RPC {
   static final Log LOG = LogFactory.getLog(RPC.class);
   static final Log LOG = LogFactory.getLog(RPC.class);
+  
+  
+  /**
+   * Get the protocol name.
+   *  If the protocol class has a ProtocolAnnotation, then get the protocol
+   *  name from the annotation; otherwise the class name is the protocol name.
+   */
+  static public String getProtocolName(Class<?> protocol) {
+    if (protocol == null) {
+      return null;
+    }
+    ProtocolInfo anno = (ProtocolInfo) protocol.getAnnotation(ProtocolInfo.class);
+    return  (anno == null) ? protocol.getName() : anno.protocolName();
+  }
 
 
   private RPC() {}                                  // no public ctor
   private RPC() {}                                  // no public ctor
 
 
@@ -553,8 +567,10 @@ public class RPC {
   }
   }
 
 
   /** Construct a server for a protocol implementation instance. */
   /** Construct a server for a protocol implementation instance. */
-  public static Server getServer(Class<?> protocol,
-                                 Object instance, String bindAddress, int port,
+
+  public static <PROTO extends VersionedProtocol, IMPL extends PROTO> 
+        Server getServer(Class<PROTO> protocol,
+                                 IMPL instance, String bindAddress, int port,
                                  int numHandlers, int numReaders, int queueSizePerHandler,
                                  int numHandlers, int numReaders, int queueSizePerHandler,
                                  boolean verbose, Configuration conf,
                                  boolean verbose, Configuration conf,
                                  SecretManager<? extends TokenIdentifier> secretManager) 
                                  SecretManager<? extends TokenIdentifier> secretManager) 
@@ -576,6 +592,18 @@ public class RPC {
       super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
       super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
             conf, serverName, secretManager);
             conf, serverName, secretManager);
     }
     }
+    
+    /**
+     * Add a protocol to the existing server.
+     * @param protocolClass - the protocol class
+     * @param protocolImpl - the impl of the protocol that will be called
+     * @return the server (for convenience)
+     */
+    public <PROTO extends VersionedProtocol, IMPL extends PROTO>
+      Server addProtocol(Class<PROTO> protocolClass, IMPL protocolImpl
+    ) throws IOException {
+      throw new IOException("addProtocol Not Implemented");
+    }
   }
   }
 
 
 }
 }

+ 7 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -900,7 +900,7 @@ public abstract class Server {
     private InetAddress addr;
     private InetAddress addr;
     
     
     ConnectionHeader header = new ConnectionHeader();
     ConnectionHeader header = new ConnectionHeader();
-    Class<?> protocol;
+    String protocolName;
     boolean useSasl;
     boolean useSasl;
     SaslServer saslServer;
     SaslServer saslServer;
     private AuthMethod authMethod;
     private AuthMethod authMethod;
@@ -1287,15 +1287,8 @@ public abstract class Server {
       DataInputStream in =
       DataInputStream in =
         new DataInputStream(new ByteArrayInputStream(buf));
         new DataInputStream(new ByteArrayInputStream(buf));
       header.readFields(in);
       header.readFields(in);
-      try {
-        String protocolClassName = header.getProtocol();
-        if (protocolClassName != null) {
-          protocol = getProtocolClass(header.getProtocol(), conf);
-          rpcDetailedMetrics.init(protocol);
-        }
-      } catch (ClassNotFoundException cnfe) {
-        throw new IOException("Unknown protocol: " + header.getProtocol());
-      }
+      protocolName = header.getProtocol();
+
       
       
       UserGroupInformation protocolUser = header.getUgi();
       UserGroupInformation protocolUser = header.getUgi();
       if (!useSasl) {
       if (!useSasl) {
@@ -1484,7 +1477,7 @@ public abstract class Server {
             // Make the call as the user via Subject.doAs, thus associating
             // Make the call as the user via Subject.doAs, thus associating
             // the call with the Subject
             // the call with the Subject
             if (call.connection.user == null) {
             if (call.connection.user == null) {
-              value = call(call.connection.protocol, call.param, 
+              value = call(call.connection.protocolName, call.param, 
                            call.timestamp);
                            call.timestamp);
             } else {
             } else {
               value = 
               value = 
@@ -1493,7 +1486,7 @@ public abstract class Server {
                      @Override
                      @Override
                      public Writable run() throws Exception {
                      public Writable run() throws Exception {
                        // make the call
                        // make the call
-                       return call(call.connection.protocol, 
+                       return call(call.connection.protocolName, 
                                    call.param, call.timestamp);
                                    call.param, call.timestamp);
 
 
                      }
                      }
@@ -1753,7 +1746,7 @@ public abstract class Server {
   
   
   /** 
   /** 
    * Called for each call. 
    * Called for each call. 
-   * @deprecated Use {@link #call(Class, Writable, long)} instead
+   * @deprecated Use {@link #call(String, Writable, long)} instead
    */
    */
   @Deprecated
   @Deprecated
   public Writable call(Writable param, long receiveTime) throws IOException {
   public Writable call(Writable param, long receiveTime) throws IOException {
@@ -1761,7 +1754,7 @@ public abstract class Server {
   }
   }
   
   
   /** Called for each call. */
   /** Called for each call. */
-  public abstract Writable call(Class<?> protocol,
+  public abstract Writable call(String protocol,
                                Writable param, long receiveTime)
                                Writable param, long receiveTime)
   throws IOException;
   throws IOException;
   
   

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java

@@ -34,7 +34,6 @@ public interface VersionedProtocol {
    * @return the version that the server will speak
    * @return the version that the server will speak
    * @throws IOException if any IO error occurs
    * @throws IOException if any IO error occurs
    */
    */
-  @Deprecated
   public long getProtocolVersion(String protocol,
   public long getProtocolVersion(String protocol,
                                  long clientVersion) throws IOException;
                                  long clientVersion) throws IOException;
 
 

+ 312 - 37
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -27,6 +27,9 @@ import java.lang.reflect.InvocationTargetException;
 
 
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.io.*;
 import java.io.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.HashMap;
 
 
@@ -35,6 +38,7 @@ import javax.net.SocketFactory;
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
 
 
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -47,10 +51,46 @@ import org.apache.hadoop.conf.*;
 public class WritableRpcEngine implements RpcEngine {
 public class WritableRpcEngine implements RpcEngine {
   private static final Log LOG = LogFactory.getLog(RPC.class);
   private static final Log LOG = LogFactory.getLog(RPC.class);
   
   
+ 
+  /**
+   * Get all superInterfaces that extend VersionedProtocol
+   * @param childInterfaces
+   * @return the super interfaces that extend VersionedProtocol
+   */
+  private static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
+    List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
+
+    for (Class<?> childInterface : childInterfaces) {
+      if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
+          allInterfaces.add(childInterface);
+          allInterfaces.addAll(
+              Arrays.asList(
+                  getSuperInterfaces(childInterface.getInterfaces())));
+      } else {
+        LOG.warn("Interface " + childInterface +
+              " ignored because it does not extend VersionedProtocol");
+      }
+    }
+    return (Class<?>[]) allInterfaces.toArray(new Class[allInterfaces.size()]);
+  }
+  
+  /**
+   * Get all interfaces that the given protocol implements or extends
+   * which are assignable from VersionedProtocol.
+   */
+  private static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
+    Class<?>[] interfaces  = protocol.getInterfaces();
+    return getSuperInterfaces(interfaces);
+  }
+
+  
   //writableRpcVersion should be updated if there is a change
   //writableRpcVersion should be updated if there is a change
   //in format of the rpc messages.
   //in format of the rpc messages.
-  public static long writableRpcVersion = 1L;
+  
+  // 2L - added declared class to Invocation
+  public static final long writableRpcVersion = 2L; 
 
 
+  
   /** A method invocation, including the method name and its parameters.*/
   /** A method invocation, including the method name and its parameters.*/
   private static class Invocation implements Writable, Configurable {
   private static class Invocation implements Writable, Configurable {
     private String methodName;
     private String methodName;
@@ -59,11 +99,13 @@ public class WritableRpcEngine implements RpcEngine {
     private Configuration conf;
     private Configuration conf;
     private long clientVersion;
     private long clientVersion;
     private int clientMethodsHash;
     private int clientMethodsHash;
+    private String declaringClassProtocolName;
     
     
     //This could be different from static writableRpcVersion when received
     //This could be different from static writableRpcVersion when received
     //at server, if client is using a different version.
     //at server, if client is using a different version.
     private long rpcVersion;
     private long rpcVersion;
 
 
+    @SuppressWarnings("unused") // called when deserializing an invocation
     public Invocation() {}
     public Invocation() {}
 
 
     public Invocation(Method method, Object[] parameters) {
     public Invocation(Method method, Object[] parameters) {
@@ -88,6 +130,8 @@ public class WritableRpcEngine implements RpcEngine {
         this.clientMethodsHash = ProtocolSignature.getFingerprint(method
         this.clientMethodsHash = ProtocolSignature.getFingerprint(method
             .getDeclaringClass().getMethods());
             .getDeclaringClass().getMethods());
       }
       }
+      this.declaringClassProtocolName = 
+          RPC.getProtocolName(method.getDeclaringClass());
     }
     }
 
 
     /** The name of the method invoked. */
     /** The name of the method invoked. */
@@ -103,6 +147,7 @@ public class WritableRpcEngine implements RpcEngine {
       return clientVersion;
       return clientVersion;
     }
     }
 
 
+    @SuppressWarnings("unused")
     private int getClientMethodsHash() {
     private int getClientMethodsHash() {
       return clientMethodsHash;
       return clientMethodsHash;
     }
     }
@@ -115,8 +160,10 @@ public class WritableRpcEngine implements RpcEngine {
       return rpcVersion;
       return rpcVersion;
     }
     }
 
 
+    @SuppressWarnings("deprecation")
     public void readFields(DataInput in) throws IOException {
     public void readFields(DataInput in) throws IOException {
       rpcVersion = in.readLong();
       rpcVersion = in.readLong();
+      declaringClassProtocolName = UTF8.readString(in);
       methodName = UTF8.readString(in);
       methodName = UTF8.readString(in);
       clientVersion = in.readLong();
       clientVersion = in.readLong();
       clientMethodsHash = in.readInt();
       clientMethodsHash = in.readInt();
@@ -124,13 +171,16 @@ public class WritableRpcEngine implements RpcEngine {
       parameterClasses = new Class[parameters.length];
       parameterClasses = new Class[parameters.length];
       ObjectWritable objectWritable = new ObjectWritable();
       ObjectWritable objectWritable = new ObjectWritable();
       for (int i = 0; i < parameters.length; i++) {
       for (int i = 0; i < parameters.length; i++) {
-        parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
+        parameters[i] = 
+            ObjectWritable.readObject(in, objectWritable, this.conf);
         parameterClasses[i] = objectWritable.getDeclaredClass();
         parameterClasses[i] = objectWritable.getDeclaredClass();
       }
       }
     }
     }
 
 
+    @SuppressWarnings("deprecation")
     public void write(DataOutput out) throws IOException {
     public void write(DataOutput out) throws IOException {
       out.writeLong(rpcVersion);
       out.writeLong(rpcVersion);
+      UTF8.writeString(out, declaringClassProtocolName);
       UTF8.writeString(out, methodName);
       UTF8.writeString(out, methodName);
       out.writeLong(clientVersion);
       out.writeLong(clientVersion);
       out.writeInt(clientMethodsHash);
       out.writeInt(clientMethodsHash);
@@ -273,30 +323,161 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** Construct a server for a protocol implementation instance listening on a
   /** Construct a server for a protocol implementation instance listening on a
    * port and address. */
    * port and address. */
-  public Server getServer(Class<?> protocol,
-                          Object instance, String bindAddress, int port,
-                          int numHandlers, int numReaders, int queueSizePerHandler,
-                          boolean verbose, Configuration conf,
+  public RPC.Server getServer(Class<?> protocolClass,
+                      Object protocolImpl, String bindAddress, int port,
+                      int numHandlers, int numReaders, int queueSizePerHandler,
+                      boolean verbose, Configuration conf,
                       SecretManager<? extends TokenIdentifier> secretManager) 
                       SecretManager<? extends TokenIdentifier> secretManager) 
     throws IOException {
     throws IOException {
-    return new Server(instance, conf, bindAddress, port, numHandlers, 
-        numReaders, queueSizePerHandler, verbose, secretManager);
+    return new Server(protocolClass, protocolImpl, conf, bindAddress, port,
+        numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
   }
   }
 
 
+
   /** An RPC Server. */
   /** An RPC Server. */
   public static class Server extends RPC.Server {
   public static class Server extends RPC.Server {
-    private Object instance;
     private boolean verbose;
     private boolean verbose;
+    
+    /**
+     *  The key in Map
+     */
+    static class ProtoNameVer {
+      final String protocol;
+      final long   version;
+      ProtoNameVer(String protocol, long ver) {
+        this.protocol = protocol;
+        this.version = ver;
+      }
+      @Override
+      public boolean equals(Object o) {
+        if (o == null) 
+          return false;
+        if (this == o) 
+          return true;
+        if (! (o instanceof ProtoNameVer))
+          return false;
+        ProtoNameVer pv = (ProtoNameVer) o;
+        return ((pv.protocol.equals(this.protocol)) && 
+            (pv.version == this.version));     
+      }
+      @Override
+      public int hashCode() {
+        return protocol.hashCode() * 37 + (int) version;    
+      }
+    }
+    
+    /**
+     * The value in map
+     */
+    static class ProtoClassProtoImpl {
+      final Class<?> protocolClass;
+      final Object protocolImpl; 
+      ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
+        this.protocolClass = protocolClass;
+        this.protocolImpl = protocolImpl;
+      }
+    }
+    
+    private Map<ProtoNameVer, ProtoClassProtoImpl> protocolImplMap = 
+        new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10);
+    
+    // Register  protocol and its impl for rpc calls
+    private void registerProtocolAndImpl(Class<?> protocolClass, 
+        Object protocolImpl) throws IOException {
+      String protocolName = RPC.getProtocolName(protocolClass);
+      VersionedProtocol vp = (VersionedProtocol) protocolImpl;
+      long version;
+      try {
+        version = vp.getProtocolVersion(protocolName, 0);
+      } catch (Exception ex) {
+        LOG.warn("Protocol "  + protocolClass + 
+             " NOT registered as getProtocolVersion throws exception ");
+        return;
+      }
+      protocolImplMap.put(new ProtoNameVer(protocolName, version),
+          new ProtoClassProtoImpl(protocolClass, protocolImpl)); 
+      LOG.info("ProtocolImpl=" + protocolImpl.getClass().getName() + 
+          " protocolClass=" + protocolClass.getName() + " version=" + version);
+    }
+    
+    private static class VerProtocolImpl {
+      final long version;
+      final ProtoClassProtoImpl protocolTarget;
+      VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
+        this.version = ver;
+        this.protocolTarget = protocolTarget;
+      }
+    }
+    
+    
+    @SuppressWarnings("unused") // will be useful later.
+    private VerProtocolImpl[] getSupportedProtocolVersions(
+        String protocolName) {
+      VerProtocolImpl[] resultk = new  VerProtocolImpl[protocolImplMap.size()];
+      int i = 0;
+      for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
+                                        protocolImplMap.entrySet()) {
+        if (pv.getKey().protocol.equals(protocolName)) {
+          resultk[i++] = 
+              new VerProtocolImpl(pv.getKey().version, pv.getValue());
+        }
+      }
+      if (i == 0) {
+        return null;
+      }
+      VerProtocolImpl[] result = new VerProtocolImpl[i];
+      System.arraycopy(resultk, 0, result, 0, i);
+      return result;
+    }
+    
+    private VerProtocolImpl getHighestSupportedProtocol(String protocolName) {    
+      Long highestVersion = 0L;
+      ProtoClassProtoImpl highest = null;
+      for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : protocolImplMap
+          .entrySet()) {
+        if (pv.getKey().protocol.equals(protocolName)) {
+          if ((highest == null) || (pv.getKey().version > highestVersion)) {
+            highest = pv.getValue();
+            highestVersion = pv.getKey().version;
+          } 
+        }
+      }
+      if (highest == null) {
+        return null;
+      }
+      return new VerProtocolImpl(highestVersion,  highest);   
+    }
+ 
 
 
     /** Construct an RPC server.
     /** Construct an RPC server.
+     * @param instance the instance whose methods will be called
+     * @param conf the configuration to use
+     * @param bindAddress the address to bind on to listen for connection
+     * @param port the port to listen for connections on
+     * 
+     * @deprecated Use #Server(Class, Object, Configuration, String, int)
+     *    
+     */
+    @Deprecated
+    public Server(Object instance, Configuration conf, String bindAddress,
+        int port) 
+      throws IOException {
+      this(null, instance, conf,  bindAddress, port);
+    }
+    
+    
+    /** Construct an RPC server.
+     * @param protocol class
      * @param instance the instance whose methods will be called
      * @param instance the instance whose methods will be called
      * @param conf the configuration to use
      * @param conf the configuration to use
      * @param bindAddress the address to bind on to listen for connection
      * @param bindAddress the address to bind on to listen for connection
      * @param port the port to listen for connections on
      * @param port the port to listen for connections on
      */
      */
-    public Server(Object instance, Configuration conf, String bindAddress, int port) 
+    public Server(Class<?> protocolClass, Object protocolImpl, 
+        Configuration conf, String bindAddress, int port) 
       throws IOException {
       throws IOException {
-      this(instance, conf,  bindAddress, port, 1, -1, -1, false, null);
+      this(protocolClass, protocolImpl, conf,  bindAddress, port, 1, -1, -1,
+          false, null);
     }
     }
     
     
     private static String classNameBase(String className) {
     private static String classNameBase(String className) {
@@ -307,35 +488,103 @@ public class WritableRpcEngine implements RpcEngine {
       return names[names.length-1];
       return names[names.length-1];
     }
     }
     
     
+    
     /** Construct an RPC server.
     /** Construct an RPC server.
-     * @param instance the instance whose methods will be called
+     * @param protocolImpl the instance whose methods will be called
      * @param conf the configuration to use
      * @param conf the configuration to use
      * @param bindAddress the address to bind on to listen for connection
      * @param bindAddress the address to bind on to listen for connection
      * @param port the port to listen for connections on
      * @param port the port to listen for connections on
      * @param numHandlers the number of method handler threads to run
      * @param numHandlers the number of method handler threads to run
      * @param verbose whether each call should be logged
      * @param verbose whether each call should be logged
+     * 
+     * @deprecated use Server#Server(Class, Object, 
+     *      Configuration, String, int, int, int, int, boolean, SecretManager)
      */
      */
-    public Server(Object instance, Configuration conf, String bindAddress,  int port,
-                  int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, 
-                  SecretManager<? extends TokenIdentifier> secretManager) 
+    @Deprecated
+    public Server(Object protocolImpl, Configuration conf, String bindAddress,
+        int port, int numHandlers, int numReaders, int queueSizePerHandler,
+        boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) 
+            throws IOException {
+       this(null, protocolImpl,  conf,  bindAddress,   port,
+                   numHandlers,  numReaders,  queueSizePerHandler,  verbose, 
+                   secretManager);
+   
+    }
+    
+    /** Construct an RPC server.
+     * @param protocolClass - the protocol being registered
+     *     can be null for compatibility with old usage (see below for details)
+     * @param protocolImpl the protocol impl that will be called
+     * @param conf the configuration to use
+     * @param bindAddress the address to bind on to listen for connection
+     * @param port the port to listen for connections on
+     * @param numHandlers the number of method handler threads to run
+     * @param verbose whether each call should be logged
+     */
+    public Server(Class<?> protocolClass, Object protocolImpl,
+        Configuration conf, String bindAddress,  int port,
+        int numHandlers, int numReaders, int queueSizePerHandler, 
+        boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) 
         throws IOException {
         throws IOException {
       super(bindAddress, port, Invocation.class, numHandlers, numReaders,
       super(bindAddress, port, Invocation.class, numHandlers, numReaders,
           queueSizePerHandler, conf,
           queueSizePerHandler, conf,
-          classNameBase(instance.getClass().getName()), secretManager);
-      this.instance = instance;
+          classNameBase(protocolImpl.getClass().getName()), secretManager);
+
       this.verbose = verbose;
       this.verbose = verbose;
+      
+      
+      Class<?>[] protocols;
+      if (protocolClass == null) { // derive protocol from impl
+        /*
+         * In order to remain compatible with the old usage where a single
+         * target protocolImpl is suppled for all protocol interfaces, and
+         * the protocolImpl is derived from the protocolClass(es) 
+         * we register all interfaces extended by the protocolImpl
+         */
+        protocols = getProtocolInterfaces(protocolImpl.getClass());
+
+      } else {
+        if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
+          throw new IOException("protocolClass "+ protocolClass +
+              " is not implemented by protocolImpl which is of class " +
+              protocolImpl.getClass());
+        }
+        // register protocol class and its super interfaces
+        registerProtocolAndImpl(protocolClass, protocolImpl);
+        protocols = getProtocolInterfaces(protocolClass);
+      }
+      for (Class<?> p : protocols) {
+        if (!p.equals(VersionedProtocol.class)) {
+          registerProtocolAndImpl(p, protocolImpl);
+        }
+      }
+
     }
     }
 
 
-    public Writable call(Class<?> protocol, Writable param, long receivedTime) 
+ 
+    @Override
+    public <PROTO extends VersionedProtocol, IMPL extends PROTO> Server
+      addProtocol(
+        Class<PROTO> protocolClass, IMPL protocolImpl) throws IOException {
+      registerProtocolAndImpl(protocolClass, protocolImpl);
+      return this;
+    }
+    
+    /**
+     * Process a client call
+     * @param protocolName - the protocol name (the class of the client proxy
+     *      used to make calls to the rpc server.
+     * @param param  parameters
+     * @param receivedTime time at which the call receoved (for metrics)
+     * @return the call's return
+     * @throws IOException
+     */
+    public Writable call(String protocolName, Writable param, long receivedTime) 
     throws IOException {
     throws IOException {
       try {
       try {
         Invocation call = (Invocation)param;
         Invocation call = (Invocation)param;
         if (verbose) log("Call: " + call);
         if (verbose) log("Call: " + call);
 
 
-        Method method = protocol.getMethod(call.getMethodName(),
-                                           call.getParameterClasses());
-        method.setAccessible(true);
-
         // Verify rpc version
         // Verify rpc version
         if (call.getRpcVersion() != writableRpcVersion) {
         if (call.getRpcVersion() != writableRpcVersion) {
           // Client is using a different version of WritableRpc
           // Client is using a different version of WritableRpc
@@ -344,25 +593,51 @@ public class WritableRpcEngine implements RpcEngine {
                   + call.getRpcVersion() + ", server side version="
                   + call.getRpcVersion() + ", server side version="
                   + writableRpcVersion);
                   + writableRpcVersion);
         }
         }
-        
-        //Verify protocol version.
-        //Bypass the version check for VersionedProtocol
-        if (!method.getDeclaringClass().equals(VersionedProtocol.class)) {
-          long clientVersion = call.getProtocolVersion();
-          ProtocolSignature serverInfo = ((VersionedProtocol) instance)
-              .getProtocolSignature(protocol.getCanonicalName(), call
-                  .getProtocolVersion(), call.getClientMethodsHash());
-          long serverVersion = serverInfo.getVersion();
-          if (serverVersion != clientVersion) {
-            LOG.warn("Version mismatch: client version=" + clientVersion
-                + ", server version=" + serverVersion);
-            throw new RPC.VersionMismatch(protocol.getName(), clientVersion,
-                serverVersion);
+
+        long clientVersion = call.getProtocolVersion();
+        final String protoName;
+        ProtoClassProtoImpl protocolImpl;
+        if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
+          // VersionProtocol methods are often used by client to figure out
+          // which version of protocol to use.
+          //
+          // Versioned protocol methods should go the protocolName protocol
+          // rather than the declaring class of the method since the
+          // the declaring class is VersionedProtocol which is not 
+          // registered directly.
+          // Send the call to the highest  protocol version
+          protocolImpl = 
+              getHighestSupportedProtocol(protocolName).protocolTarget;
+        } else {
+          protoName = call.declaringClassProtocolName;
+
+          // Find the right impl for the protocol based on client version.
+          ProtoNameVer pv = 
+              new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
+          protocolImpl = protocolImplMap.get(pv);
+          if (protocolImpl == null) { // no match for Protocol AND Version
+             VerProtocolImpl highest = 
+                 getHighestSupportedProtocol(protoName);
+            if (highest == null) {
+              throw new IOException("Unknown protocol: " + protoName);
+            } else { // protocol supported but not the version that client wants
+              throw new RPC.VersionMismatch(protoName, clientVersion,
+                highest.version);
+            }
           }
           }
         }
         }
+        
+
+        // Invoke the protocol method
 
 
         long startTime = System.currentTimeMillis();
         long startTime = System.currentTimeMillis();
-        Object value = method.invoke(instance, call.getParameters());
+        Method method = 
+            protocolImpl.protocolClass.getMethod(call.getMethodName(),
+            call.getParameterClasses());
+        method.setAccessible(true);
+        rpcDetailedMetrics.init(protocolImpl.protocolClass);
+        Object value = 
+            method.invoke(protocolImpl.protocolImpl, call.getParameters());
         int processingTime = (int) (System.currentTimeMillis() - startTime);
         int processingTime = (int) (System.currentTimeMillis() - startTime);
         int qTime = (int) (startTime-receivedTime);
         int qTime = (int) (startTime-receivedTime);
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java

@@ -132,6 +132,12 @@ public class GangliaContext extends AbstractMetricsContext {
     StringBuilder sb = new StringBuilder();
     StringBuilder sb = new StringBuilder();
     sb.append(contextName);
     sb.append(contextName);
     sb.append('.');
     sb.append('.');
+
+    if (contextName.equals("jvm") && outRec.getTag("processName") != null) {
+      sb.append(outRec.getTag("processName"));
+      sb.append('.');
+    }
+
     sb.append(recordName);
     sb.append(recordName);
     sb.append('.');
     sb.append('.');
     int sbBaseLen = sb.length();
     int sbBaseLen = sb.length();

+ 69 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java

@@ -20,13 +20,21 @@ package org.apache.hadoop.metrics2.sink.ganglia;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Map;
+import java.util.Set;
 
 
+import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MsInfo;
 import org.apache.hadoop.metrics2.util.MetricsCache;
 import org.apache.hadoop.metrics2.util.MetricsCache;
 import org.apache.hadoop.metrics2.util.MetricsCache.Record;
 import org.apache.hadoop.metrics2.util.MetricsCache.Record;
 
 
@@ -38,8 +46,67 @@ public class GangliaSink30 extends AbstractGangliaSink {
 
 
   public final Log LOG = LogFactory.getLog(this.getClass());
   public final Log LOG = LogFactory.getLog(this.getClass());
 
 
+  private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
+  
   private MetricsCache metricsCache = new MetricsCache();
   private MetricsCache metricsCache = new MetricsCache();
 
 
+  // a key with a NULL value means ALL
+  private Map<String,Set<String>> useTagsMap = new HashMap<String,Set<String>>();
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void init(SubsetConfiguration conf) {
+    super.init(conf);
+
+    conf.setListDelimiter(',');
+    Iterator<String> it = (Iterator<String>) conf.getKeys();
+    while (it.hasNext()) {
+      String propertyName = it.next();
+      if (propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) {
+        String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length());
+        String[] tags = conf.getStringArray(propertyName);
+        boolean useAllTags = false;
+        Set<String> set = null;
+        if (tags.length > 0) {
+          set = new HashSet<String>();
+          for (String tag : tags) {
+            tag = tag.trim();
+            useAllTags |= tag.equals("*");
+            if (tag.length() > 0) {
+              set.add(tag);
+            }
+          }
+          if (useAllTags) {
+            set = null;
+          }
+        }
+        useTagsMap.put(contextName, set);
+      }
+    }
+  }
+
+  @InterfaceAudience.Private
+  public void appendPrefix(MetricsRecord record, StringBuilder sb) {
+    String contextName = record.context();
+    Collection<MetricsTag> tags = record.tags();
+    if (useTagsMap.containsKey(contextName)) {
+      Set<String> useTags = useTagsMap.get(contextName);
+      for (MetricsTag t : tags) {
+        if (useTags == null || useTags.contains(t.name())) {
+
+          // the context is always skipped here because it is always added
+          
+          // the hostname is always skipped to avoid case-mismatches 
+          // from different DNSes.
+
+          if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
+            sb.append('.').append(t.name()).append('=').append(t.value());
+          }
+        }
+      }
+    }          
+  }
+  
   @Override
   @Override
   public void putMetrics(MetricsRecord record) {
   public void putMetrics(MetricsRecord record) {
     // The method handles both cases whether Ganglia support dense publish
     // The method handles both cases whether Ganglia support dense publish
@@ -53,6 +120,8 @@ public class GangliaSink30 extends AbstractGangliaSink {
       sb.append('.');
       sb.append('.');
       sb.append(recordName);
       sb.append(recordName);
 
 
+      appendPrefix(record, sb);
+      
       String groupName = sb.toString();
       String groupName = sb.toString();
       sb.append('.');
       sb.append('.');
       int sbBaseLen = sb.length();
       int sbBaseLen = sb.length();

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.FilterInitializer;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java

@@ -23,7 +23,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.alfredo.util.KerberosName;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 
 
 import sun.security.krb5.Config;
 import sun.security.krb5.Config;
 import sun.security.krb5.KrbException;
 import sun.security.krb5.KrbException;

+ 1 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -158,10 +158,7 @@ public class RunJar {
 
 
     Runtime.getRuntime().addShutdownHook(new Thread() {
     Runtime.getRuntime().addShutdownHook(new Thread() {
         public void run() {
         public void run() {
-          try {
-            FileUtil.fullyDelete(workDir);
-          } catch (IOException e) {
-          }
+          FileUtil.fullyDelete(workDir);
         }
         }
       });
       });
 
 

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -317,6 +317,11 @@
   <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
   <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
 </property>
 </property>
 
 
+<property>
+  <name>fs.webhdfs.impl</name>
+  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
+</property>
+
 <property>
 <property>
   <name>fs.ftp.impl</name>
   <name>fs.ftp.impl</name>
   <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
   <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>

+ 5 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -32,6 +32,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import org.mortbay.log.Log;
 
 
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 
@@ -62,8 +63,6 @@ public abstract class FSMainOperationsBaseTest  {
   private static String TEST_DIR_AXX = "test/hadoop/axx";
   private static String TEST_DIR_AXX = "test/hadoop/axx";
   private static int numBlocks = 2;
   private static int numBlocks = 2;
   
   
-  static  final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
-  
   
   
   protected static FileSystem fSys;
   protected static FileSystem fSys;
   
   
@@ -83,7 +82,7 @@ public abstract class FSMainOperationsBaseTest  {
     }     
     }     
   };
   };
   
   
-  private static byte[] data = getFileData(numBlocks,
+  protected static final byte[] data = getFileData(numBlocks,
       getDefaultBlockSize());
       getDefaultBlockSize());
   
   
   @Before
   @Before
@@ -183,7 +182,7 @@ public abstract class FSMainOperationsBaseTest  {
   
   
   @Test
   @Test
   public void testWDAbsolute() throws IOException {
   public void testWDAbsolute() throws IOException {
-    Path absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
+    Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir");
     fSys.mkdirs(absoluteDir);
     fSys.mkdirs(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
@@ -646,7 +645,7 @@ public abstract class FSMainOperationsBaseTest  {
     writeReadAndDelete(getDefaultBlockSize() * 2);
     writeReadAndDelete(getDefaultBlockSize() * 2);
   }
   }
   
   
-  private void writeReadAndDelete(int len) throws IOException {
+  protected void writeReadAndDelete(int len) throws IOException {
     Path path = getTestRootPath(fSys, "test/hadoop/file");
     Path path = getTestRootPath(fSys, "test/hadoop/file");
     
     
     fSys.mkdirs(path.getParent());
     fSys.mkdirs(path.getParent());
@@ -768,6 +767,7 @@ public abstract class FSMainOperationsBaseTest  {
       rename(src, dst, false, false, false, Rename.NONE);
       rename(src, dst, false, false, false, Rename.NONE);
       Assert.fail("Should throw FileNotFoundException");
       Assert.fail("Should throw FileNotFoundException");
     } catch (IOException e) {
     } catch (IOException e) {
+      Log.info("XXX", e);
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
     }
     }
 
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java

@@ -45,7 +45,7 @@ import org.apache.hadoop.fs.Path;
 public abstract class FileSystemContractBaseTest extends TestCase {
 public abstract class FileSystemContractBaseTest extends TestCase {
   
   
   protected FileSystem fs;
   protected FileSystem fs;
-  private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
+  protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
   {
   {
     for (int i = 0; i < data.length; i++) {
     for (int i = 0; i < data.length; i++) {
       data[i] = (byte) (i % 10);
       data[i] = (byte) (i % 10);
@@ -215,7 +215,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
     writeReadAndDelete(getBlockSize() * 2);
     writeReadAndDelete(getBlockSize() * 2);
   }
   }
   
   
-  private void writeReadAndDelete(int len) throws IOException {
+  protected void writeReadAndDelete(int len) throws IOException {
     Path path = path("/test/hadoop/file");
     Path path = path("/test/hadoop/file");
     
     
     fs.mkdirs(path.getParent());
     fs.mkdirs(path.getParent());
@@ -256,7 +256,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
     assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
     assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
     
     
     try {
     try {
-      fs.create(path, false);
+      fs.create(path, false).close();
       fail("Should throw IOException.");
       fail("Should throw IOException.");
     } catch (IOException e) {
     } catch (IOException e) {
       // Expected
       // Expected

+ 26 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -17,16 +17,15 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.net.URI;
 import java.net.URI;
+import java.util.Random;
 
 
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
 import org.junit.Assert;
-
+import static org.junit.Assert.*;
 
 
 /**
 /**
  * Helper class for unit tests.
  * Helper class for unit tests.
@@ -143,23 +142,33 @@ public final class FileSystemTestHelper {
     }
     }
   }
   }
   
   
-  
-  public static void writeFile(FileSystem fSys, Path path,byte b[])
-    throws Exception {
-    FSDataOutputStream out = 
-      fSys.create(path);
-    out.write(b);
-    out.close();
+  static String writeFile(FileSystem fileSys, Path name, int fileSize)
+    throws IOException {
+    final long seed = 0xDEADBEEFL;
+    // Create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+    return new String(buffer);
   }
   }
   
   
-  public static byte[] readFile(FileSystem fSys, Path path, int len )
-    throws Exception {
-    DataInputStream dis = fSys.open(path);
-    byte[] buffer = new byte[len];
-    IOUtils.readFully(dis, buffer, 0, len);
-    dis.close();
-    return buffer;
+  static String readFile(FileSystem fs, Path name, int buflen) 
+    throws IOException {
+    byte[] b = new byte[buflen];
+    int offset = 0;
+    FSDataInputStream in = fs.open(name);
+    for (int remaining, n;
+        (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
+        offset += n); 
+    assertEquals(offset, Math.min(b.length, in.getPos()));
+    in.close();
+    String s = new String(b, 0, offset);
+    return s;
   }
   }
+
   public static FileStatus containsPath(FileSystem fSys, Path path,
   public static FileStatus containsPath(FileSystem fSys, Path path,
       FileStatus[] dirList)
       FileStatus[] dirList)
     throws IOException {
     throws IOException {

+ 19 - 20
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java

@@ -18,10 +18,9 @@
 
 
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
-import java.net.URI;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -56,13 +55,13 @@ public class TestChecksumFileSystem extends TestCase {
 
 
     // Exercise some boundary cases - a divisor of the chunk size
     // Exercise some boundary cases - a divisor of the chunk size
     // the chunk size, 2x chunk size, and +/-1 around these.
     // the chunk size, 2x chunk size, and +/-1 around these.
-    TestLocalFileSystem.readFile(localFs, testPath, 128);
-    TestLocalFileSystem.readFile(localFs, testPath, 511);
-    TestLocalFileSystem.readFile(localFs, testPath, 512);
-    TestLocalFileSystem.readFile(localFs, testPath, 513);
-    TestLocalFileSystem.readFile(localFs, testPath, 1023);
-    TestLocalFileSystem.readFile(localFs, testPath, 1024);
-    TestLocalFileSystem.readFile(localFs, testPath, 1025);
+    readFile(localFs, testPath, 128);
+    readFile(localFs, testPath, 511);
+    readFile(localFs, testPath, 512);
+    readFile(localFs, testPath, 513);
+    readFile(localFs, testPath, 1023);
+    readFile(localFs, testPath, 1024);
+    readFile(localFs, testPath, 1025);
 
 
     localFs.delete(localFs.getChecksumFile(testPath), true);
     localFs.delete(localFs.getChecksumFile(testPath), true);
     assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
     assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
@@ -74,7 +73,7 @@ public class TestChecksumFileSystem extends TestCase {
     
     
     boolean errorRead = false;
     boolean errorRead = false;
     try {
     try {
-      TestLocalFileSystem.readFile(localFs, testPath, 1024);
+      readFile(localFs, testPath, 1024);
     }catch(ChecksumException ie) {
     }catch(ChecksumException ie) {
       errorRead = true;
       errorRead = true;
     }
     }
@@ -83,7 +82,7 @@ public class TestChecksumFileSystem extends TestCase {
     //now setting verify false, the read should succeed
     //now setting verify false, the read should succeed
     try {
     try {
       localFs.setVerifyChecksum(false);
       localFs.setVerifyChecksum(false);
-      String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
+      String str = readFile(localFs, testPath, 1024).toString();
       assertTrue("read", "testing".equals(str));
       assertTrue("read", "testing".equals(str));
     } finally {
     } finally {
       // reset for other tests
       // reset for other tests
@@ -104,13 +103,13 @@ public class TestChecksumFileSystem extends TestCase {
 
 
     // Exercise some boundary cases - a divisor of the chunk size
     // Exercise some boundary cases - a divisor of the chunk size
     // the chunk size, 2x chunk size, and +/-1 around these.
     // the chunk size, 2x chunk size, and +/-1 around these.
-    TestLocalFileSystem.readFile(localFs, testPath, 128);
-    TestLocalFileSystem.readFile(localFs, testPath, 511);
-    TestLocalFileSystem.readFile(localFs, testPath, 512);
-    TestLocalFileSystem.readFile(localFs, testPath, 513);
-    TestLocalFileSystem.readFile(localFs, testPath, 1023);
-    TestLocalFileSystem.readFile(localFs, testPath, 1024);
-    TestLocalFileSystem.readFile(localFs, testPath, 1025);
+    readFile(localFs, testPath, 128);
+    readFile(localFs, testPath, 511);
+    readFile(localFs, testPath, 512);
+    readFile(localFs, testPath, 513);
+    readFile(localFs, testPath, 1023);
+    readFile(localFs, testPath, 1024);
+    readFile(localFs, testPath, 1025);
   }
   }
 
 
   /**
   /**
@@ -140,7 +139,7 @@ public class TestChecksumFileSystem extends TestCase {
 
 
     // Now reading the file should fail with a ChecksumException
     // Now reading the file should fail with a ChecksumException
     try {
     try {
-      TestLocalFileSystem.readFile(localFs, testPath, 1024);
+      readFile(localFs, testPath, 1024);
       fail("Did not throw a ChecksumException when reading truncated " +
       fail("Did not throw a ChecksumException when reading truncated " +
            "crc file");
            "crc file");
     } catch(ChecksumException ie) {
     } catch(ChecksumException ie) {
@@ -149,7 +148,7 @@ public class TestChecksumFileSystem extends TestCase {
     // telling it not to verify checksums, should avoid issue.
     // telling it not to verify checksums, should avoid issue.
     try {
     try {
       localFs.setVerifyChecksum(false);
       localFs.setVerifyChecksum(false);
-      String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
+      String str = readFile(localFs, testPath, 1024).toString();
       assertTrue("read", "testing truncation".equals(str));
       assertTrue("read", "testing truncation".equals(str));
     } finally {
     } finally {
       // reset for other tests
       // reset for other tests

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java

@@ -29,7 +29,7 @@ public class TestDU extends TestCase {
   final static private File DU_DIR = new File(
   final static private File DU_DIR = new File(
       System.getProperty("test.build.data","/tmp"), "dutmp");
       System.getProperty("test.build.data","/tmp"), "dutmp");
 
 
-  public void setUp() throws IOException {
+  public void setUp() {
       FileUtil.fullyDelete(DU_DIR);
       FileUtil.fullyDelete(DU_DIR);
       assertTrue(DU_DIR.mkdirs());
       assertTrue(DU_DIR.mkdirs());
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java

@@ -98,7 +98,7 @@ public class TestHardLink {
    * @throws IOException
    * @throws IOException
    */
    */
   @BeforeClass
   @BeforeClass
-  public static void setupClean() throws IOException {
+  public static void setupClean() {
     //delete source and target directories if they exist
     //delete source and target directories if they exist
     FileUtil.fullyDelete(src);
     FileUtil.fullyDelete(src);
     FileUtil.fullyDelete(tgt_one);
     FileUtil.fullyDelete(tgt_one);

+ 30 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -208,4 +209,33 @@ public class TestLocalDirAllocator extends TestCase {
     }
     }
   }
   }
   
   
+  /** Two buffer dirs. The first dir does not exist & is on a read-only disk; 
+   * The second dir exists & is RW
+   * getLocalPathForWrite with checkAccess set to false should create a parent
+   * directory. With checkAccess true, the directory should not be created.
+   * @throws Exception
+   */
+  public void testLocalPathForWriteDirCreation() throws IOException {
+    try {
+      conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
+      assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
+      BUFFER_ROOT.setReadOnly();
+      Path p1 =
+          dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
+      assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
+
+      Path p2 =
+          dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
+              false);
+      try {
+        localFs.getFileStatus(p2.getParent());
+      } catch (Exception e) {
+        assertEquals(e.getClass(), FileNotFoundException.class);
+      }
+    } finally {
+      Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
+      rmBufferDirs();
+    }
+  }
+  
 }
 }

+ 54 - 34
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -18,37 +18,23 @@
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+
 import java.io.*;
 import java.io.*;
-import junit.framework.*;
+
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.Test;
 
 
 /**
 /**
  * This class tests the local file system via the FileSystem abstraction.
  * This class tests the local file system via the FileSystem abstraction.
  */
  */
-public class TestLocalFileSystem extends TestCase {
+public class TestLocalFileSystem {
   private static String TEST_ROOT_DIR
   private static String TEST_ROOT_DIR
     = System.getProperty("test.build.data","build/test/data/work-dir/localfs");
     = System.getProperty("test.build.data","build/test/data/work-dir/localfs");
 
 
-
-  static void writeFile(FileSystem fs, Path name) throws IOException {
-    FSDataOutputStream stm = fs.create(name);
-    stm.writeBytes("42\n");
-    stm.close();
-  }
-  
-  static String readFile(FileSystem fs, Path name, int buflen) throws IOException {
-    byte[] b = new byte[buflen];
-    int offset = 0;
-    FSDataInputStream in = fs.open(name);
-    for(int remaining, n;
-        (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
-        offset += n); 
-    assertEquals(offset, Math.min(b.length, in.getPos()));
-    in.close();
-
-    String s = new String(b, 0, offset);
-    System.out.println("s=" + s);
-    return s;
-  }
+  private Configuration conf;
+  private FileSystem fileSys;
 
 
   private void cleanupFile(FileSystem fs, Path name) throws IOException {
   private void cleanupFile(FileSystem fs, Path name) throws IOException {
     assertTrue(fs.exists(name));
     assertTrue(fs.exists(name));
@@ -56,12 +42,18 @@ public class TestLocalFileSystem extends TestCase {
     assertTrue(!fs.exists(name));
     assertTrue(!fs.exists(name));
   }
   }
   
   
+  @Before
+  public void setup() throws IOException {
+    conf = new Configuration();
+    fileSys = FileSystem.getLocal(conf);
+    fileSys.delete(new Path(TEST_ROOT_DIR), true);
+  }
+
   /**
   /**
    * Test the capability of setting the working directory.
    * Test the capability of setting the working directory.
    */
    */
+  @Test
   public void testWorkingDirectory() throws IOException {
   public void testWorkingDirectory() throws IOException {
-    Configuration conf = new Configuration();
-    FileSystem fileSys = FileSystem.getLocal(conf);
     Path origDir = fileSys.getWorkingDirectory();
     Path origDir = fileSys.getWorkingDirectory();
     Path subdir = new Path(TEST_ROOT_DIR, "new");
     Path subdir = new Path(TEST_ROOT_DIR, "new");
     try {
     try {
@@ -85,7 +77,7 @@ public class TestLocalFileSystem extends TestCase {
       // create files and manipulate them.
       // create files and manipulate them.
       Path file1 = new Path("file1");
       Path file1 = new Path("file1");
       Path file2 = new Path("sub/file2");
       Path file2 = new Path("sub/file2");
-      writeFile(fileSys, file1);
+      String contents = writeFile(fileSys, file1, 1);
       fileSys.copyFromLocalFile(file1, file2);
       fileSys.copyFromLocalFile(file1, file2);
       assertTrue(fileSys.exists(file1));
       assertTrue(fileSys.exists(file1));
       assertTrue(fileSys.isFile(file1));
       assertTrue(fileSys.isFile(file1));
@@ -103,11 +95,10 @@ public class TestLocalFileSystem extends TestCase {
       InputStream stm = fileSys.open(file1);
       InputStream stm = fileSys.open(file1);
       byte[] buffer = new byte[3];
       byte[] buffer = new byte[3];
       int bytesRead = stm.read(buffer, 0, 3);
       int bytesRead = stm.read(buffer, 0, 3);
-      assertEquals("42\n", new String(buffer, 0, bytesRead));
+      assertEquals(contents, new String(buffer, 0, bytesRead));
       stm.close();
       stm.close();
     } finally {
     } finally {
       fileSys.setWorkingDirectory(origDir);
       fileSys.setWorkingDirectory(origDir);
-      fileSys.delete(subdir, true);
     }
     }
   }
   }
 
 
@@ -115,6 +106,7 @@ public class TestLocalFileSystem extends TestCase {
    * test Syncable interface on raw local file system
    * test Syncable interface on raw local file system
    * @throws IOException
    * @throws IOException
    */
    */
+  @Test
   public void testSyncable() throws IOException {
   public void testSyncable() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf).getRawFileSystem();
     FileSystem fs = FileSystem.getLocal(conf).getRawFileSystem();
@@ -148,12 +140,13 @@ public class TestLocalFileSystem extends TestCase {
     }
     }
   }
   }
   
   
+  @Test
   public void testCopy() throws IOException {
   public void testCopy() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     LocalFileSystem fs = FileSystem.getLocal(conf);
     LocalFileSystem fs = FileSystem.getLocal(conf);
     Path src = new Path(TEST_ROOT_DIR, "dingo");
     Path src = new Path(TEST_ROOT_DIR, "dingo");
     Path dst = new Path(TEST_ROOT_DIR, "yak");
     Path dst = new Path(TEST_ROOT_DIR, "yak");
-    writeFile(fs, src);
+    writeFile(fs, src, 1);
     assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
     assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
     assertTrue(!fs.exists(src) && fs.exists(dst));
     assertTrue(!fs.exists(src) && fs.exists(dst));
     assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
     assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
@@ -170,9 +163,12 @@ public class TestLocalFileSystem extends TestCase {
     try {
     try {
       FileUtil.copy(fs, dst, fs, src, true, true, conf);
       FileUtil.copy(fs, dst, fs, src, true, true, conf);
       fail("Failed to detect existing dir");
       fail("Failed to detect existing dir");
-    } catch (IOException e) { }
+    } catch (IOException e) {
+      // Expected
+    }
   }
   }
 
 
+  @Test
   public void testHomeDirectory() throws IOException {
   public void testHomeDirectory() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     FileSystem fileSys = FileSystem.getLocal(conf);
     FileSystem fileSys = FileSystem.getLocal(conf);
@@ -182,16 +178,18 @@ public class TestLocalFileSystem extends TestCase {
     assertEquals(home, fsHome);
     assertEquals(home, fsHome);
   }
   }
 
 
+  @Test
   public void testPathEscapes() throws IOException {
   public void testPathEscapes() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);
     FileSystem fs = FileSystem.getLocal(conf);
     Path path = new Path(TEST_ROOT_DIR, "foo%bar");
     Path path = new Path(TEST_ROOT_DIR, "foo%bar");
-    writeFile(fs, path);
+    writeFile(fs, path, 1);
     FileStatus status = fs.getFileStatus(path);
     FileStatus status = fs.getFileStatus(path);
     assertEquals(path.makeQualified(fs), status.getPath());
     assertEquals(path.makeQualified(fs), status.getPath());
     cleanupFile(fs, path);
     cleanupFile(fs, path);
   }
   }
   
   
+  @Test
   public void testMkdirs() throws IOException {
   public void testMkdirs() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     LocalFileSystem fs = FileSystem.getLocal(conf);
     LocalFileSystem fs = FileSystem.getLocal(conf);
@@ -199,18 +197,40 @@ public class TestLocalFileSystem extends TestCase {
     Path test_file = new Path(TEST_ROOT_DIR, "file1");
     Path test_file = new Path(TEST_ROOT_DIR, "file1");
     assertTrue(fs.mkdirs(test_dir));
     assertTrue(fs.mkdirs(test_dir));
    
    
-    writeFile(fs, test_file);
+    writeFile(fs, test_file, 1);
     // creating dir over a file
     // creating dir over a file
     Path bad_dir = new Path(test_file, "another_dir");
     Path bad_dir = new Path(test_file, "another_dir");
     
     
     try {
     try {
       fs.mkdirs(bad_dir);
       fs.mkdirs(bad_dir);
       fail("Failed to detect existing file in path");
       fail("Failed to detect existing file in path");
-    } catch (FileAlreadyExistsException e) { }
+    } catch (FileAlreadyExistsException e) { 
+      // Expected
+    }
     
     
     try {
     try {
       fs.mkdirs(null);
       fs.mkdirs(null);
       fail("Failed to detect null in mkdir arg");
       fail("Failed to detect null in mkdir arg");
-    } catch (IllegalArgumentException e) { }
+    } catch (IllegalArgumentException e) {
+      // Expected
+    }
+  }
+
+  /** Test deleting a file, directory, and non-existent path */
+  @Test
+  public void testBasicDelete() throws IOException {
+    Configuration conf = new Configuration();
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
+    Path file1 = new Path(TEST_ROOT_DIR, "file1");
+    Path file2 = new Path(TEST_ROOT_DIR+"/dir1", "file2");
+    Path file3 = new Path(TEST_ROOT_DIR, "does-not-exist");
+    assertTrue(fs.mkdirs(dir1));
+    writeFile(fs, file1, 1);
+    writeFile(fs, file2, 1);
+    assertFalse("Returned true deleting non-existant path", 
+        fs.delete(file3));
+    assertTrue("Did not delete file", fs.delete(file1));
+    assertTrue("Did not delete non-empty dir", fs.delete(dir1));
   }
   }
 }
 }

+ 12 - 21
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java

@@ -19,9 +19,9 @@ package org.apache.hadoop.fs;
 
 
 
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
@@ -42,14 +42,6 @@ public class TestTrash extends TestCase {
     new Path(new File(System.getProperty("test.build.data","/tmp")
     new Path(new File(System.getProperty("test.build.data","/tmp")
           ).toURI().toString().replace(' ', '+'), "testTrash");
           ).toURI().toString().replace(' ', '+'), "testTrash");
 
 
-  protected static Path writeFile(FileSystem fs, Path f) throws IOException {
-    DataOutputStream out = fs.create(f);
-    out.writeBytes("dhruba: " + f);
-    out.close();
-    assertTrue(fs.exists(f));
-    return f;
-  }
-
   protected static Path mkdir(FileSystem fs, Path p) throws IOException {
   protected static Path mkdir(FileSystem fs, Path p) throws IOException {
     assertTrue(fs.mkdirs(p));
     assertTrue(fs.mkdirs(p));
     assertTrue(fs.exists(p));
     assertTrue(fs.exists(p));
@@ -139,7 +131,7 @@ public class TestTrash extends TestCase {
 
 
     // Second, create a file in that directory.
     // Second, create a file in that directory.
     Path myFile = new Path(base, "test/mkdirs/myFile");
     Path myFile = new Path(base, "test/mkdirs/myFile");
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
 
 
     // Verify that expunge without Trash directory
     // Verify that expunge without Trash directory
     // won't throw Exception
     // won't throw Exception
@@ -176,7 +168,7 @@ public class TestTrash extends TestCase {
     }
     }
 
 
     // Verify that we can recreate the file
     // Verify that we can recreate the file
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
 
 
     // Verify that we succeed in removing the file we re-created
     // Verify that we succeed in removing the file we re-created
     {
     {
@@ -194,7 +186,7 @@ public class TestTrash extends TestCase {
     }
     }
 
 
     // Verify that we can recreate the file
     // Verify that we can recreate the file
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
     
     
     // Verify that we succeed in removing the whole directory
     // Verify that we succeed in removing the whole directory
     // along with the file inside it.
     // along with the file inside it.
@@ -234,7 +226,7 @@ public class TestTrash extends TestCase {
     {
     {
         Path toErase = new Path(trashRoot, "toErase");
         Path toErase = new Path(trashRoot, "toErase");
         int retVal = -1;
         int retVal = -1;
-        writeFile(trashRootFs, toErase);
+        writeFile(trashRootFs, toErase, 10);
         try {
         try {
           retVal = shell.run(new String[] {"-rm", toErase.toString()});
           retVal = shell.run(new String[] {"-rm", toErase.toString()});
         } catch (Exception e) {
         } catch (Exception e) {
@@ -265,7 +257,7 @@ public class TestTrash extends TestCase {
 
 
     // recreate directory and file
     // recreate directory and file
     mkdir(fs, myPath);
     mkdir(fs, myPath);
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
 
 
     // remove file first, then remove directory
     // remove file first, then remove directory
     {
     {
@@ -316,7 +308,7 @@ public class TestTrash extends TestCase {
     
     
     // recreate directory and file
     // recreate directory and file
     mkdir(fs, myPath);
     mkdir(fs, myPath);
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
     
     
     // Verify that skip trash option really skips the trash for files (rm)
     // Verify that skip trash option really skips the trash for files (rm)
     {
     {
@@ -346,7 +338,7 @@ public class TestTrash extends TestCase {
     
     
     // recreate directory and file
     // recreate directory and file
     mkdir(fs, myPath);
     mkdir(fs, myPath);
-    writeFile(fs, myFile);
+    writeFile(fs, myFile, 10);
     
     
     // Verify that skip trash option really skips the trash for rmr
     // Verify that skip trash option really skips the trash for rmr
     {
     {
@@ -392,7 +384,7 @@ public class TestTrash extends TestCase {
       for(int i=0;i<num_runs; i++) {
       for(int i=0;i<num_runs; i++) {
         
         
         //create file
         //create file
-        writeFile(fs, myFile);
+        writeFile(fs, myFile, 10);
          
          
         // delete file
         // delete file
         try {
         try {
@@ -452,8 +444,7 @@ public class TestTrash extends TestCase {
         lfs.delete(p, true);
         lfs.delete(p, true);
       }
       }
       try {
       try {
-        f = writeFile(lfs, f);
-
+        writeFile(lfs, f, 10);
         FileSystem.closeAll();
         FileSystem.closeAll();
         FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
         FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
         Trash lTrash = new Trash(localFs, conf);
         Trash lTrash = new Trash(localFs, conf);
@@ -515,7 +506,7 @@ public class TestTrash extends TestCase {
     while (true)  {
     while (true)  {
       // Create a file with a new name
       // Create a file with a new name
       Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
       Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
-      writeFile(fs, myFile);
+      writeFile(fs, myFile, 10);
 
 
       // Delete the file to trash
       // Delete the file to trash
       String[] args = new String[2];
       String[] args = new String[2];
@@ -606,7 +597,7 @@ public class TestTrash extends TestCase {
     int iters = 1000;
     int iters = 1000;
     for(int i=0;i<iters; i++) {
     for(int i=0;i<iters; i++) {
       
       
-      writeFile(fs, myFile);
+      writeFile(fs, myFile, 10);
       
       
       start = System.currentTimeMillis();
       start = System.currentTimeMillis();
       
       

+ 25 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -18,9 +18,7 @@
 package org.apache.hadoop.http;
 package org.apache.hadoop.http;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
-import java.net.URLConnection;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -52,6 +50,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
+import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -59,6 +58,7 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
+import org.mortbay.util.ajax.JSON;
 
 
 public class TestHttpServer extends HttpServerFunctionalTest {
 public class TestHttpServer extends HttpServerFunctionalTest {
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
@@ -75,7 +75,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
                       ) throws ServletException, IOException {
                       ) throws ServletException, IOException {
       PrintWriter out = response.getWriter();
       PrintWriter out = response.getWriter();
       Map<String, String[]> params = request.getParameterMap();
       Map<String, String[]> params = request.getParameterMap();
-      SortedSet<String> keys = new TreeSet(params.keySet());
+      SortedSet<String> keys = new TreeSet<String>(params.keySet());
       for(String key: keys) {
       for(String key: keys) {
         out.print(key);
         out.print(key);
         out.print(':');
         out.print(':');
@@ -101,7 +101,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
                       HttpServletResponse response
                       HttpServletResponse response
                       ) throws ServletException, IOException {
                       ) throws ServletException, IOException {
       PrintWriter out = response.getWriter();
       PrintWriter out = response.getWriter();
-      SortedSet<String> sortedKeys = new TreeSet();
+      SortedSet<String> sortedKeys = new TreeSet<String>();
       Enumeration<String> keys = request.getParameterNames();
       Enumeration<String> keys = request.getParameterNames();
       while(keys.hasMoreElements()) {
       while(keys.hasMoreElements()) {
         sortedKeys.add(keys.nextElement());
         sortedKeys.add(keys.nextElement());
@@ -118,7 +118,6 @@ public class TestHttpServer extends HttpServerFunctionalTest {
 
 
   @SuppressWarnings("serial")
   @SuppressWarnings("serial")
   public static class HtmlContentServlet extends HttpServlet {
   public static class HtmlContentServlet extends HttpServlet {
-    @SuppressWarnings("unchecked")
     @Override
     @Override
     public void doGet(HttpServletRequest request, 
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response
                       HttpServletResponse response
@@ -131,10 +130,14 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   }
   }
 
 
   @BeforeClass public static void setup() throws Exception {
   @BeforeClass public static void setup() throws Exception {
-    server = createTestServer();
+    Configuration conf = new Configuration();
+    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    server = createTestServer(conf);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
+    server.addJerseyResourcePackage(
+        JerseyResource.class.getPackage().getName(), "/jersey/*");
     server.start();
     server.start();
     baseUrl = getServerURL(server);
     baseUrl = getServerURL(server);
     LOG.info("HTTP server started: "+ baseUrl);
     LOG.info("HTTP server started: "+ baseUrl);
@@ -161,7 +164,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
             assertEquals("a:b\nc:d\n",
             assertEquals("a:b\nc:d\n",
                          readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
                          readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
             int serverThreads = server.webServer.getThreadPool().getThreads();
             int serverThreads = server.webServer.getThreadPool().getThreads();
-            assertTrue(serverThreads <= MAX_THREADS);
+            assertTrue("More threads are started than expected, Server Threads count: "
+                    + serverThreads, serverThreads <= MAX_THREADS);
             System.out.println("Number of threads = " + serverThreads +
             System.out.println("Number of threads = " + serverThreads +
                 " which is less or equal than the max = " + MAX_THREADS);
                 " which is less or equal than the max = " + MAX_THREADS);
           } catch (Exception e) {
           } catch (Exception e) {
@@ -404,4 +408,18 @@ public class TestHttpServer extends HttpServerFunctionalTest {
         values, parameterValues));
         values, parameterValues));
   }
   }
 
 
+  @SuppressWarnings("unchecked")
+  private static Map<String, Object> parse(String jsonString) {
+    return (Map<String, Object>)JSON.parse(jsonString);
+  }
+
+  @Test public void testJersey() throws Exception {
+    LOG.info("BEGIN testJersey()");
+    final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar"));
+    final Map<String, Object> m = parse(js);
+    LOG.info("m=" + m);
+    assertEquals("foo", m.get(JerseyResource.PATH));
+    assertEquals("bar", m.get(JerseyResource.OP));
+    LOG.info("END testJersey()");
+  }
 }
 }

+ 64 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http.resource;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.mortbay.util.ajax.JSON;
+
+/**
+ * A simple Jersey resource class TestHttpServer.
+ * The servlet simply puts the path and the op parameter in a map
+ * and return it in JSON format in the response.
+ */
+@Path("")
+public class JerseyResource {
+  static final Log LOG = LogFactory.getLog(JerseyResource.class);
+
+  public static final String PATH = "path";
+  public static final String OP = "op";
+
+  @GET
+  @Path("{" + PATH + ":.*}")
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response get(
+      @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
+      @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
+      ) throws IOException {
+    LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
+
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put(PATH, path);
+    m.put(OP, op);
+    final String js = JSON.toString(m);
+    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java

@@ -50,7 +50,7 @@ public class TestNativeIO {
   }
   }
 
 
   @Before
   @Before
-  public void setupTestDir() throws IOException {
+  public void setupTestDir() {
     FileUtil.fullyDelete(TEST_DIR);
     FileUtil.fullyDelete(TEST_DIR);
     TEST_DIR.mkdirs();
     TEST_DIR.mkdirs();
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -97,7 +97,7 @@ public class TestIPC {
     }
     }
 
 
     @Override
     @Override
-    public Writable call(Class<?> protocol, Writable param, long receiveTime)
+    public Writable call(String protocol, Writable param, long receiveTime)
         throws IOException {
         throws IOException {
       if (sleep) {
       if (sleep) {
         // sleep a bit
         // sleep a bit

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java

@@ -72,7 +72,7 @@ public class TestIPCServerResponder extends TestCase {
     }
     }
 
 
     @Override
     @Override
-    public Writable call(Class<?> protocol, Writable param, long receiveTime)
+    public Writable call(String protocol, Writable param, long receiveTime)
         throws IOException {
         throws IOException {
       if (sleep) {
       if (sleep) {
         try {
         try {

+ 255 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java

@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.junit.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
+
+public class TestMultipleProtocolServer {
+  private static final String ADDRESS = "0.0.0.0";
+  private static InetSocketAddress addr;
+  private static RPC.Server server;
+
+  private static Configuration conf = new Configuration();
+  
+  
+  @ProtocolInfo(protocolName="Foo")
+  interface Foo0 extends VersionedProtocol {
+    public static final long versionID = 0L;
+    String ping() throws IOException;
+    
+  }
+  
+  @ProtocolInfo(protocolName="Foo")
+  interface Foo1 extends VersionedProtocol {
+    public static final long versionID = 1L;
+    String ping() throws IOException;
+    String ping2() throws IOException;
+  }
+  
+  @ProtocolInfo(protocolName="Foo")
+  interface FooUnimplemented extends VersionedProtocol {
+    public static final long versionID = 2L;
+    String ping() throws IOException;  
+  }
+  
+  interface Mixin extends VersionedProtocol{
+    public static final long versionID = 0L;
+    void hello() throws IOException;
+  }
+  interface Bar extends Mixin, VersionedProtocol {
+    public static final long versionID = 0L;
+    int echo(int i) throws IOException;
+  }
+  
+  
+  
+  class Foo0Impl implements Foo0 {
+
+    @Override
+    public long getProtocolVersion(String protocol, long clientVersion)
+        throws IOException {
+      return Foo0.versionID;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public ProtocolSignature getProtocolSignature(String protocol,
+        long clientVersion, int clientMethodsHash) throws IOException {
+      Class<? extends VersionedProtocol> inter;
+      try {
+        inter = (Class<? extends VersionedProtocol>)getClass().
+                                          getGenericInterfaces()[0];
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
+          getProtocolVersion(protocol, clientVersion), inter);
+    }
+
+    @Override
+    public String ping() {
+      return "Foo0";     
+    }
+    
+  }
+  
+  class Foo1Impl implements Foo1 {
+
+    @Override
+    public long getProtocolVersion(String protocol, long clientVersion)
+        throws IOException {
+      return Foo1.versionID;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public ProtocolSignature getProtocolSignature(String protocol,
+        long clientVersion, int clientMethodsHash) throws IOException {
+      Class<? extends VersionedProtocol> inter;
+      try {
+        inter = (Class<? extends VersionedProtocol>)getClass().
+                                        getGenericInterfaces()[0];
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
+          getProtocolVersion(protocol, clientVersion), inter);
+    }
+
+    @Override
+    public String ping() {
+      return "Foo1";
+    }
+
+    @Override
+    public String ping2() {
+      return "Foo1";
+      
+    }
+    
+  }
+
+  
+  class BarImpl implements Bar {
+
+    @Override
+    public long getProtocolVersion(String protocol, long clientVersion)
+        throws IOException {
+      return Bar.versionID;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public ProtocolSignature getProtocolSignature(String protocol,
+        long clientVersion, int clientMethodsHash) throws IOException {
+      Class<? extends VersionedProtocol> inter;
+      try {
+        inter = (Class<? extends VersionedProtocol>)getClass().
+                                          getGenericInterfaces()[0];
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
+          getProtocolVersion(protocol, clientVersion), inter);
+    }
+
+    @Override
+    public int echo(int i) {
+      return i;
+    }
+
+    @Override
+    public void hello() {
+
+      
+    }
+  }
+  @Before
+  public void setUp() throws Exception {
+    // create a server with two handlers
+    server = RPC.getServer(Foo0.class,
+                              new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
+    server.addProtocol(Foo1.class, new Foo1Impl());
+    server.addProtocol(Bar.class, new BarImpl());
+    server.addProtocol(Mixin.class, new BarImpl());
+    server.start();
+    addr = NetUtils.getConnectAddress(server);
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+  }
+
+  @Test
+  public void test1() throws IOException {
+    ProtocolProxy<?> proxy;
+    proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
+
+    Foo0 foo0 = (Foo0)proxy.getProxy(); 
+    Assert.assertEquals("Foo0", foo0.ping());
+    
+    
+    proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
+    
+    
+    Foo1 foo1 = (Foo1)proxy.getProxy(); 
+    Assert.assertEquals("Foo1", foo1.ping());
+    Assert.assertEquals("Foo1", foo1.ping());
+    
+    
+    proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
+    
+    
+    Bar bar = (Bar)proxy.getProxy(); 
+    Assert.assertEquals(99, bar.echo(99));
+    
+    // Now test Mixin class method
+    
+    Mixin mixin = bar;
+    mixin.hello();
+  }
+  
+  
+  // Server does not implement the FooUnimplemented version of protocol Foo.
+  // See that calls to it fail.
+  @Test(expected=IOException.class)
+  public void testNonExistingProtocol() throws IOException {
+    ProtocolProxy<?> proxy;
+    proxy = RPC.getProtocolProxy(FooUnimplemented.class, 
+        FooUnimplemented.versionID, addr, conf);
+
+    FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); 
+    foo.ping();
+  }
+  
+  
+  /**
+   * getProtocolVersion of an unimplemented version should return highest version
+   * Similarly getProtocolSignature should work.
+   * @throws IOException
+   */
+  @Test
+  public void testNonExistingProtocol2() throws IOException {
+    ProtocolProxy<?> proxy;
+    proxy = RPC.getProtocolProxy(FooUnimplemented.class, 
+        FooUnimplemented.versionID, addr, conf);
+
+    FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); 
+    Assert.assertEquals(Foo1.versionID, 
+        foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class), 
+        FooUnimplemented.versionID));
+    foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class), 
+        FooUnimplemented.versionID, 0);
+  }
+  
+  @Test(expected=IOException.class)
+  public void testIncorrectServerCreation() throws IOException {
+    RPC.getServer(Foo1.class,
+        new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
+  }
+}

+ 31 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java

@@ -39,7 +39,7 @@ import org.junit.Test;
 public class TestRPCCompatibility {
 public class TestRPCCompatibility {
   private static final String ADDRESS = "0.0.0.0";
   private static final String ADDRESS = "0.0.0.0";
   private static InetSocketAddress addr;
   private static InetSocketAddress addr;
-  private static Server server;
+  private static RPC.Server server;
   private ProtocolProxy<?> proxy;
   private ProtocolProxy<?> proxy;
 
 
   public static final Log LOG =
   public static final Log LOG =
@@ -52,10 +52,12 @@ public class TestRPCCompatibility {
     void ping() throws IOException;    
     void ping() throws IOException;    
   }
   }
   
   
-  public interface TestProtocol1 extends TestProtocol0 {
+  public interface TestProtocol1 extends VersionedProtocol, TestProtocol0 {
     String echo(String value) throws IOException;
     String echo(String value) throws IOException;
   }
   }
 
 
+  @ProtocolInfo(protocolName=
+      "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
   public interface TestProtocol2 extends TestProtocol1 {
   public interface TestProtocol2 extends TestProtocol1 {
     int echo(int value)  throws IOException;
     int echo(int value)  throws IOException;
   }
   }
@@ -89,11 +91,23 @@ public class TestRPCCompatibility {
   public static class TestImpl1 extends TestImpl0 implements TestProtocol1 {
   public static class TestImpl1 extends TestImpl0 implements TestProtocol1 {
     @Override
     @Override
     public String echo(String value) { return value; }
     public String echo(String value) { return value; }
+    @Override
+    public long getProtocolVersion(String protocol,
+        long clientVersion) throws IOException {
+        return TestProtocol1.versionID;
+    }
   }
   }
 
 
   public static class TestImpl2 extends TestImpl1 implements TestProtocol2 {
   public static class TestImpl2 extends TestImpl1 implements TestProtocol2 {
     @Override
     @Override
     public int echo(int value) { return value; }
     public int echo(int value) { return value; }
+
+    @Override
+    public long getProtocolVersion(String protocol,
+        long clientVersion) throws IOException {
+      return TestProtocol2.versionID;
+    }
+
   }
   }
   
   
   @After
   @After
@@ -109,8 +123,10 @@ public class TestRPCCompatibility {
   @Test  // old client vs new server
   @Test  // old client vs new server
   public void testVersion0ClientVersion1Server() throws Exception {
   public void testVersion0ClientVersion1Server() throws Exception {
     // create a server with two handlers
     // create a server with two handlers
+    TestImpl1 impl = new TestImpl1();
     server = RPC.getServer(TestProtocol1.class,
     server = RPC.getServer(TestProtocol1.class,
-                              new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
+                            impl, ADDRESS, 0, 2, false, conf, null);
+    server.addProtocol(TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -172,8 +188,10 @@ public class TestRPCCompatibility {
   @Test // Compatible new client & old server
   @Test // Compatible new client & old server
   public void testVersion2ClientVersion1Server() throws Exception {
   public void testVersion2ClientVersion1Server() throws Exception {
     // create a server with two handlers
     // create a server with two handlers
+    TestImpl1 impl = new TestImpl1();
     server = RPC.getServer(TestProtocol1.class,
     server = RPC.getServer(TestProtocol1.class,
-                              new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
+                              impl, ADDRESS, 0, 2, false, conf, null);
+    server.addProtocol(TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -190,8 +208,10 @@ public class TestRPCCompatibility {
   @Test // equal version client and server
   @Test // equal version client and server
   public void testVersion2ClientVersion2Server() throws Exception {
   public void testVersion2ClientVersion2Server() throws Exception {
     // create a server with two handlers
     // create a server with two handlers
+    TestImpl2 impl = new TestImpl2();
     server = RPC.getServer(TestProtocol2.class,
     server = RPC.getServer(TestProtocol2.class,
-                              new TestImpl2(), ADDRESS, 0, 2, false, conf, null);
+                             impl, ADDRESS, 0, 2, false, conf, null);
+    server.addProtocol(TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -250,14 +270,16 @@ public class TestRPCCompatibility {
     assertEquals(hash1, hash2);
     assertEquals(hash1, hash2);
   }
   }
   
   
+  @ProtocolInfo(protocolName=
+      "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
   public interface TestProtocol4 extends TestProtocol2 {
   public interface TestProtocol4 extends TestProtocol2 {
-    public static final long versionID = 1L;
+    public static final long versionID = 4L;
     int echo(int value)  throws IOException;
     int echo(int value)  throws IOException;
   }
   }
   
   
   @Test
   @Test
   public void testVersionMismatch() throws IOException {
   public void testVersionMismatch() throws IOException {
-    server = RPC.getServer(TestProtocol2.class, new TestImpl0(), ADDRESS, 0, 2,
+    server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2,
         false, conf, null);
         false, conf, null);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
@@ -268,7 +290,8 @@ public class TestRPCCompatibility {
       proxy.echo(21);
       proxy.echo(21);
       fail("The call must throw VersionMismatch exception");
       fail("The call must throw VersionMismatch exception");
     } catch (IOException ex) {
     } catch (IOException ex) {
-      Assert.assertTrue(ex.getMessage().contains("VersionMismatch"));
+      Assert.assertTrue("Expected version mismatch but got " + ex.getMessage(), 
+          ex.getMessage().contains("VersionMismatch"));
     }
     }
   }
   }
 }
 }

+ 43 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java

@@ -26,12 +26,17 @@ import java.net.DatagramPacket;
 import java.net.DatagramSocket;
 import java.net.DatagramSocket;
 import java.net.SocketException;
 import java.net.SocketException;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -54,6 +59,44 @@ public class TestGangliaMetrics {
       "test.s1rec.S1NumOps",
       "test.s1rec.S1NumOps",
       "test.s1rec.S1AvgTime" };
       "test.s1rec.S1AvgTime" };
 
 
+  @Test
+  public void testTagsForPrefix() throws Exception {
+    ConfigBuilder cb = new ConfigBuilder()
+      .add("test.sink.ganglia.tagsForPrefix.all", "*")
+      .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, NumActiveSources")
+      .add("test.sink.ganglia.tagsForPrefix.none", "");
+    GangliaSink30 sink = new GangliaSink30();
+    sink.init(cb.subset("test.sink.ganglia"));
+
+    List<MetricsTag> tags = new ArrayList<MetricsTag>();
+    tags.add(new MetricsTag(MsInfo.Context, "all"));
+    tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
+    tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
+    tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
+    tags.add(new MetricsTag(MsInfo.Hostname, "host"));
+    Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
+    MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
+
+    StringBuilder sb = new StringBuilder();
+    sink.appendPrefix(record, sb);
+    assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
+
+    tags.set(0, new MetricsTag(MsInfo.Context, "some"));
+    sb = new StringBuilder();
+    sink.appendPrefix(record, sb);
+    assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
+
+    tags.set(0, new MetricsTag(MsInfo.Context, "none"));
+    sb = new StringBuilder();
+    sink.appendPrefix(record, sb);
+    assertEquals("", sb.toString());
+
+    tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
+    sb = new StringBuilder();
+    sink.appendPrefix(record, sb);
+    assertEquals("", sb.toString());
+  }
+  
   @Test public void testGangliaMetrics2() throws Exception {
   @Test public void testGangliaMetrics2() throws Exception {
     ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
     ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
         .add("test.sink.gsink30.context", "test") // filter out only "test"
         .add("test.sink.gsink30.context", "test") // filter out only "test"

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java

@@ -18,7 +18,7 @@ package org.apache.hadoop.security;
 
 
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
 import org.mockito.Mockito;
 import org.mockito.Mockito;

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java

@@ -49,8 +49,7 @@ public class TestRunJar extends TestCase {
   }
   }
 
 
   @After
   @After
-  protected void tearDown()
-      throws Exception {
+  protected void tearDown() {
     FileUtil.fullyDelete(TEST_ROOT_DIR);
     FileUtil.fullyDelete(TEST_ROOT_DIR);
   }
   }
 
 

+ 1 - 0
hadoop-common-project/pom.xml

@@ -29,6 +29,7 @@
 
 
   <modules>
   <modules>
     <module>hadoop-auth</module>
     <module>hadoop-auth</module>
+    <module>hadoop-auth-examples</module>
     <module>hadoop-common</module>
     <module>hadoop-common</module>
     <module>hadoop-annotations</module>
     <module>hadoop-annotations</module>
   </modules>
   </modules>

+ 34 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -5,9 +5,31 @@ Trunk (unreleased changes)
     HDFS-395.  DFS Scalability: Incremental block reports. (Tomasz Nykiel
     HDFS-395.  DFS Scalability: Incremental block reports. (Tomasz Nykiel
                via hairong)
                via hairong)
 
 
+  IMPROVEMENTS
+
+    HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
+
+    HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
+               HdfsConstants. (Harsh J Chouraria via atm)
+    HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
+
+    HDFS-2018. Move all journal stream management code into one place.
+               (Ivan Kelly via jitendra)
+
+    HDFS-2223. Untangle depencencies between NN components (todd)
+
   BUG FIXES
   BUG FIXES
     HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
     HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
 
 
+    HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
+               via atm)
+    HDFS-2310. TestBackupNode fails since HADOOP-7524 went in.
+               (Ivan Kelly via todd)
+
+    HDFS-2313. Rat excludes has a typo for excluding editsStored files. (atm)
+
+    HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -687,6 +709,9 @@ Release 0.23.0 - Unreleased
     HDFS-2266.  Add Namesystem and SafeMode interfaces to avoid directly
     HDFS-2266.  Add Namesystem and SafeMode interfaces to avoid directly
     referring to FSNamesystem in BlockManager. (szetszwo)
     referring to FSNamesystem in BlockManager. (szetszwo)
 
 
+    HDFS-1217.  Change some NameNode methods from public to package private.
+    (Laxman via szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -1003,6 +1028,9 @@ Release 0.23.0 - Unreleased
     HDFS-2286. DataXceiverServer logs AsynchronousCloseException at shutdown
     HDFS-2286. DataXceiverServer logs AsynchronousCloseException at shutdown
     (todd)
     (todd)
 
 
+    HDFS-2289. Ensure jsvc is bundled with the HDFS distribution artifact.
+    (Alejandro Abdelnur via acmurthy) 
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.
     HDFS-1521. Persist transaction ID on disk between NN restarts.
@@ -1086,6 +1114,7 @@ Release 0.22.0 - Unreleased
     (jghoman)
     (jghoman)
 
 
     HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
     HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
+    Added additional unit tests per HADOOP-6889. (John George via mattf)
 
 
     HDFS-202.  HDFS support of listLocatedStatus introduced in HADOOP-6870.
     HDFS-202.  HDFS support of listLocatedStatus introduced in HADOOP-6870.
     HDFS piggyback block locations to each file status when listing a
     HDFS piggyback block locations to each file status when listing a
@@ -1541,6 +1570,11 @@ Release 0.22.0 - Unreleased
     HDFS-1981. NameNode does not saveNamespace() when editsNew is empty.
     HDFS-1981. NameNode does not saveNamespace() when editsNew is empty.
     (Uma Maheswara Rao G via shv)
     (Uma Maheswara Rao G via shv)
 
 
+    HDFS-2258. Reset lease limits to default values in TestLeaseRecovery2. (shv)
+
+    HDFS-2232. Generalize regular expressions in TestHDFSCLI.
+    (Plamen Jeliazkov via shv)
+
 Release 0.21.1 - Unreleased
 Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
 

+ 51 - 1
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -296,7 +296,7 @@
             <exclude>src/test/all-tests</exclude>
             <exclude>src/test/all-tests</exclude>
             <exclude>src/test/resources/*.tgz</exclude>
             <exclude>src/test/resources/*.tgz</exclude>
             <exclude>src/test/resources/data*</exclude>
             <exclude>src/test/resources/data*</exclude>
-            <exclude>src/test/resources/editStored*</exclude>
+            <exclude>src/test/resources/editsStored*</exclude>
             <exclude>src/test/resources/empty-file</exclude>
             <exclude>src/test/resources/empty-file</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
@@ -304,6 +304,56 @@
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>xprepare-package-hadoop-daemon</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <condition property="commons.daemon.os.name" value="darwin">
+                    <os name="Mac OS X"/>
+                </condition>
+                <condition property="commons.daemon.os.arch" value="universal">
+                    <os name="Mac OS X"/>
+                </condition>
+                <condition property="commons.daemon.os.name" value="linux">
+                    <os name="Linux" />
+                </condition>
+                <!-- Set commons.daemon.os.arch to either i686 or x86_64 for GNU/Linux -->
+                <condition property="commons.daemon.os.arch" value="x86_64">
+                    <os name="Linux" arch="amd64"/>
+                </condition>
+                <condition property="commons.daemon.os.arch" value="i686">
+                    <os name="Linux" /> <!-- This is a guess -->
+                </condition>
+                <property name="commons.daemon.tar.name"
+                          value="commons-daemon-${commons-daemon.version}-bin-${commons.daemon.os.name}-${commons.daemon.os.arch}.tar.gz"/>
+               
+                <property name="commons.daemon.download.dir"
+                          value="${project.build.directory}/downloads/commons-daemon"/>
+                <delete dir="${commons.daemon.download.dir}"/>
+                <mkdir dir="${commons.daemon.download.dir}"/>
+                <get src="http://archive.apache.org/dist/commons/daemon/binaries/${commons-daemon.version}/${commons.daemon.os.name}/${commons.daemon.tar.name}"
+                    dest="${commons.daemon.download.dir}/${commons.daemon.tar.name}" verbose="true" skipexisting="true"/>
+                <untar compression="gzip" src="${commons.daemon.download.dir}/${commons.daemon.tar.name}"
+                       dest="${commons.daemon.download.dir}"/>
+                <copy file="${commons.daemon.download.dir}/jsvc"
+                      todir="${project.build.directory}/${project.artifactId}-${project.version}/libexec"
+                      verbose="true"/>
+                <chmod perm="ugo+x" type="file">
+                  <fileset file="${project.build.directory}/${project.artifactId}-${project.version}/libexec/jsvc"/>
+                </chmod>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -118,7 +118,7 @@ if [ "$starting_secure_dn" = "true" ]; then
    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
   fi
   fi
 
 
-  exec "$HADOOP_HDFS_HOME/bin/jsvc" \
+  exec "$HADOOP_HDFS_HOME/libexec/jsvc" \
            -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
            -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
            -errfile "$HADOOP_LOG_DIR/jsvc.err" \
            -errfile "$HADOOP_LOG_DIR/jsvc.err" \
            -pidfile "$HADOOP_SECURE_DN_PID" \
            -pidfile "$HADOOP_SECURE_DN_PID" \

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml

@@ -505,7 +505,7 @@
       using <code>'bin/hadoop dfsadmin -safemode'</code> command. NameNode front
       using <code>'bin/hadoop dfsadmin -safemode'</code> command. NameNode front
       page shows whether Safemode is on or off. A more detailed
       page shows whether Safemode is on or off. A more detailed
       description and configuration is maintained as JavaDoc for
       description and configuration is maintained as JavaDoc for
-      <a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/dfs/NameNode.html#setSafeMode(org.apache.hadoop.dfs.FSConstants.SafeModeAction)"><code>setSafeMode()</code></a>.
+      <a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/dfs/NameNode.html#setSafeMode(org.apache.hadoop.dfs.HdfsConstants.SafeModeAction)"><code>setSafeMode()</code></a>.
     </p>
     </p>
     
     
    </section> <section> <title> fsck </title>
    </section> <section> <title> fsck </title>

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -70,9 +70,9 @@ public class Hdfs extends AbstractFileSystem {
    * @throws IOException
    * @throws IOException
    */
    */
   Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
   Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
-    super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
+    super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
 
 
-    if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) {
+    if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
       throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
       throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
     }
     }
     String host = theUri.getHost();
     String host = theUri.getHost();

+ 16 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -60,10 +60,10 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -156,14 +156,14 @@ public class DFSClient implements java.io.Closeable {
           DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
           DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
           DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
           DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
       confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
       confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
-          HdfsConstants.WRITE_TIMEOUT);
+          HdfsServerConstants.WRITE_TIMEOUT);
       ioBufferSize = conf.getInt(
       ioBufferSize = conf.getInt(
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
       bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
       bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
           DFS_BYTES_PER_CHECKSUM_DEFAULT);
           DFS_BYTES_PER_CHECKSUM_DEFAULT);
       socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
       socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
-          HdfsConstants.READ_TIMEOUT);
+          HdfsServerConstants.READ_TIMEOUT);
       /** dfs.write.packet.size is an internal config variable */
       /** dfs.write.packet.size is an internal config variable */
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
@@ -279,12 +279,12 @@ public class DFSClient implements java.io.Closeable {
    */
    */
   int getDatanodeWriteTimeout(int numNodes) {
   int getDatanodeWriteTimeout(int numNodes) {
     return (dfsClientConf.confTime > 0) ?
     return (dfsClientConf.confTime > 0) ?
-      (dfsClientConf.confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
+      (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
   }
   }
 
 
   int getDatanodeReadTimeout(int numNodes) {
   int getDatanodeReadTimeout(int numNodes) {
     return dfsClientConf.socketTimeout > 0 ?
     return dfsClientConf.socketTimeout > 0 ?
-        (HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes +
+        (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
             dfsClientConf.socketTimeout) : 0;
             dfsClientConf.socketTimeout) : 0;
   }
   }
   
   
@@ -1046,7 +1046,7 @@ public class DFSClient implements java.io.Closeable {
 
 
           out = new DataOutputStream(
           out = new DataOutputStream(
               new BufferedOutputStream(NetUtils.getOutputStream(sock), 
               new BufferedOutputStream(NetUtils.getOutputStream(sock), 
-                                       FSConstants.SMALL_BUFFER_SIZE));
+                                       HdfsConstants.SMALL_BUFFER_SIZE));
           in = new DataInputStream(NetUtils.getInputStream(sock));
           in = new DataInputStream(NetUtils.getInputStream(sock));
 
 
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
@@ -1225,7 +1225,7 @@ public class DFSClient implements java.io.Closeable {
   /**
   /**
    * Enter, leave or get safe mode.
    * Enter, leave or get safe mode.
    * 
    * 
-   * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+   * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
    */
    */
   public boolean setSafeMode(SafeModeAction action) throws IOException {
   public boolean setSafeMode(SafeModeAction action) throws IOException {
     return namenode.setSafeMode(action);
     return namenode.setSafeMode(action);
@@ -1293,7 +1293,7 @@ public class DFSClient implements java.io.Closeable {
   }
   }
 
 
   /**
   /**
-   * @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)
+   * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
    */
    */
   public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
   public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
       throws IOException {
       throws IOException {
@@ -1392,10 +1392,10 @@ public class DFSClient implements java.io.Closeable {
   void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
   void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
       throws IOException {
       throws IOException {
     // sanity check
     // sanity check
-    if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET &&
-         namespaceQuota != FSConstants.QUOTA_RESET) ||
-        (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET &&
-         diskspaceQuota != FSConstants.QUOTA_RESET)) {
+    if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
+         namespaceQuota != HdfsConstants.QUOTA_RESET) ||
+        (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
+         diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
       throw new IllegalArgumentException("Invalid values for quota : " +
       throw new IllegalArgumentException("Invalid values for quota : " +
                                          namespaceQuota + " and " + 
                                          namespaceQuota + " and " + 
                                          diskspaceQuota);
                                          diskspaceQuota);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -166,7 +166,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
       this.seqno = HEART_BEAT_SEQNO;
       this.seqno = HEART_BEAT_SEQNO;
       
       
       buffer = null;
       buffer = null;
-      int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER;
+      int packetSize = PacketHeader.PKT_HEADER_LEN + HdfsConstants.BYTES_IN_INTEGER;
       buf = new byte[packetSize];
       buf = new byte[packetSize];
       
       
       checksumStart = dataStart = packetSize;
       checksumStart = dataStart = packetSize;
@@ -234,12 +234,12 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
                          dataStart - checksumLen , checksumLen); 
                          dataStart - checksumLen , checksumLen); 
       }
       }
       
       
-      int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
+      int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
       
       
       //normally dataStart == checksumPos, i.e., offset is zero.
       //normally dataStart == checksumPos, i.e., offset is zero.
       buffer = ByteBuffer.wrap(
       buffer = ByteBuffer.wrap(
         buf, dataStart - checksumPos,
         buf, dataStart - checksumPos,
-        PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER);
+        PacketHeader.PKT_HEADER_LEN + pktLen - HdfsConstants.BYTES_IN_INTEGER);
       buf = null;
       buf = null;
       buffer.mark();
       buffer.mark();
 
 
@@ -849,7 +849,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
         final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
         final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
         out = new DataOutputStream(new BufferedOutputStream(
         out = new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(sock, writeTimeout),
             NetUtils.getOutputStream(sock, writeTimeout),
-            FSConstants.SMALL_BUFFER_SIZE));
+            HdfsConstants.SMALL_BUFFER_SIZE));
 
 
         //send the TRANSFER_BLOCK request
         //send the TRANSFER_BLOCK request
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
@@ -1023,7 +1023,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
         //
         //
         out = new DataOutputStream(new BufferedOutputStream(
         out = new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(s, writeTimeout),
             NetUtils.getOutputStream(s, writeTimeout),
-            FSConstants.SMALL_BUFFER_SIZE));
+            HdfsConstants.SMALL_BUFFER_SIZE));
         
         
         assert null == blockReplyStream : "Previous blockReplyStream unclosed";
         assert null == blockReplyStream : "Previous blockReplyStream unclosed";
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
@@ -1173,7 +1173,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
     final int timeout = client.getDatanodeReadTimeout(length);
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, timeout);
     NetUtils.connect(sock, isa, timeout);
     sock.setSoTimeout(timeout);
     sock.setSoTimeout(timeout);
-    sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+    sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
     if(DFSClient.LOG.isDebugEnabled()) {
     if(DFSClient.LOG.isDebugEnabled()) {
       DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
       DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
     }
     }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -646,7 +646,7 @@ public class DFSUtil {
   static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
   static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
     throws IOException {
     throws IOException {
     RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
     RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+        5, HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
     
     
     Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
     Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
       new HashMap<Class<? extends Exception>, RetryPolicy>();
       new HashMap<Class<? extends Exception>, RetryPolicy>();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -49,9 +49,9 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -108,7 +108,7 @@ public class DistributedFileSystem extends FileSystem {
 
 
     InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
     InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
     this.dfs = new DFSClient(namenode, conf, statistics);
     this.dfs = new DFSClient(namenode, conf, statistics);
-    this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
+    this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
     this.workingDir = getHomeDirectory();
     this.workingDir = getHomeDirectory();
   }
   }
 
 
@@ -642,9 +642,9 @@ public class DistributedFileSystem extends FileSystem {
    * Enter, leave or get safe mode.
    * Enter, leave or get safe mode.
    *  
    *  
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
-   *    FSConstants.SafeModeAction)
+   *    HdfsConstants.SafeModeAction)
    */
    */
-  public boolean setSafeMode(FSConstants.SafeModeAction action) 
+  public boolean setSafeMode(HdfsConstants.SafeModeAction action) 
   throws IOException {
   throws IOException {
     return dfs.setSafeMode(action);
     return dfs.setSafeMode(action);
   }
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java

@@ -30,7 +30,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -162,7 +162,7 @@ class LeaseRenewer {
   /** The time in milliseconds that the map became empty. */
   /** The time in milliseconds that the map became empty. */
   private long emptyTime = Long.MAX_VALUE;
   private long emptyTime = Long.MAX_VALUE;
   /** A fixed lease renewal time period in milliseconds */
   /** A fixed lease renewal time period in milliseconds */
-  private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2;
+  private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2;
 
 
   /** A daemon for renewing lease */
   /** A daemon for renewing lease */
   private Daemon daemon = null;
   private Daemon daemon = null;
@@ -352,7 +352,7 @@ class LeaseRenewer {
 
 
     //update renewal time
     //update renewal time
     if (renewal == dfsc.getHdfsTimeout()/2) {
     if (renewal == dfsc.getHdfsTimeout()/2) {
-      long min = FSConstants.LEASE_SOFTLIMIT_PERIOD;
+      long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
       for(DFSClient c : dfsclients) {
       for(DFSClient c : dfsclients) {
         if (c.getHdfsTimeout() > 0) {
         if (c.getHdfsTimeout() > 0) {
           final long timeout = c.getHdfsTimeout();
           final long timeout = c.getHdfsTimeout();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -394,7 +394,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
                                      throws IOException {
                                      throws IOException {
     // in and out will be closed when sock is closed (by the caller)
     // in and out will be closed when sock is closed (by the caller)
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
-          NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT)));
+          NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
     new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
     new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
     
     
     //
     //
@@ -486,7 +486,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
   void sendReadResult(Socket sock, Status statusCode) {
   void sendReadResult(Socket sock, Status statusCode) {
     assert !sentStatusCode : "already sent status code to " + sock;
     assert !sentStatusCode : "already sent status code to " + sock;
     try {
     try {
-      OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
+      OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT);
       
       
       ClientReadStatusProto.newBuilder()
       ClientReadStatusProto.newBuilder()
         .setStatus(statusCode)
         .setStatus(statusCode)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java

@@ -22,7 +22,7 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 
 
 /**
 /**

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -578,7 +578,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
    * Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
    * otherwise all datanodes if type is ALL.
    * otherwise all datanodes if type is ALL.
    */
    */
-  public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
+  public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
       throws IOException;
       throws IOException;
 
 
   /**
   /**
@@ -601,7 +601,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * <p>
    * <p>
    * Safe mode is entered automatically at name node startup.
    * Safe mode is entered automatically at name node startup.
    * Safe mode can also be entered manually using
    * Safe mode can also be entered manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+   * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
    * <p>
    * <p>
    * At startup the name node accepts data node reports collecting
    * At startup the name node accepts data node reports collecting
    * information about block locations.
    * information about block locations.
@@ -617,11 +617,11 @@ public interface ClientProtocol extends VersionedProtocol {
    * Then the name node leaves safe mode.
    * Then the name node leaves safe mode.
    * <p>
    * <p>
    * If safe mode is turned on manually using
    * If safe mode is turned on manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+   * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
    * then the name node stays in safe mode until it is manually turned off
    * then the name node stays in safe mode until it is manually turned off
-   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+   * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
    * Current state of the name node can be verified using
    * Current state of the name node can be verified using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+   * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
    * <h4>Configuration parameters:</h4>
    * <h4>Configuration parameters:</h4>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
@@ -644,7 +644,7 @@ public interface ClientProtocol extends VersionedProtocol {
    *                   
    *                   
    * @throws IOException
    * @throws IOException
    */
    */
-  public boolean setSafeMode(FSConstants.SafeModeAction action) 
+  public boolean setSafeMode(HdfsConstants.SafeModeAction action) 
       throws IOException;
       throws IOException;
 
 
   /**
   /**
@@ -685,7 +685,7 @@ public interface ClientProtocol extends VersionedProtocol {
   /**
   /**
    * Report distributed upgrade progress or force current upgrade to proceed.
    * Report distributed upgrade progress or force current upgrade to proceed.
    * 
    * 
-   * @param action {@link FSConstants.UpgradeAction} to perform
+   * @param action {@link HdfsConstants.UpgradeAction} to perform
    * @return upgrade status information or null if no upgrades are in progress
    * @return upgrade status information or null if no upgrades are in progress
    * @throws IOException
    * @throws IOException
    */
    */
@@ -777,8 +777,8 @@ public interface ClientProtocol extends VersionedProtocol {
    * <br><br>
    * <br><br>
    *                       
    *                       
    * The quota can have three types of values : (1) 0 or more will set 
    * The quota can have three types of values : (1) 0 or more will set 
-   * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET}  implies 
-   * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET} 
+   * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET}  implies 
+   * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET} 
    * implies the quota will be reset. Any other value is a runtime error.
    * implies the quota will be reset. Any other value is a runtime error.
    * 
    * 
    * @throws AccessControlException permission denied
    * @throws AccessControlException permission denied

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -26,9 +26,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
  * 
  * 
  ************************************/
  ************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public final class FSConstants {
+public final class HdfsConstants {
   /* Hidden constructor */
   /* Hidden constructor */
-  private FSConstants() {
+  private HdfsConstants() {
   }
   }
 
 
   public static int MIN_BLOCKS_FOR_WRITE = 5;
   public static int MIN_BLOCKS_FOR_WRITE = 5;

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -55,15 +55,15 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
@@ -306,13 +306,13 @@ public class Balancer {
       DataInputStream in = null;
       DataInputStream in = null;
       try {
       try {
         sock.connect(NetUtils.createSocketAddr(
         sock.connect(NetUtils.createSocketAddr(
-            target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
+            target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
         sock.setKeepAlive(true);
         sock.setKeepAlive(true);
         out = new DataOutputStream( new BufferedOutputStream(
         out = new DataOutputStream( new BufferedOutputStream(
-            sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+            sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
         sendRequest(out);
         sendRequest(out);
         in = new DataInputStream( new BufferedInputStream(
         in = new DataInputStream( new BufferedInputStream(
-            sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+            sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
         receiveResponse(in);
         receiveResponse(in);
         bytesMoved.inc(block.getNumBytes());
         bytesMoved.inc(block.getNumBytes());
         LOG.info( "Moving block " + block.getBlock().getBlockId() +
         LOG.info( "Moving block " + block.getBlock().getBlockId() +

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java

@@ -22,8 +22,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 
 /**
 /**

Some files were not shown because too many files changed in this diff