Kaynağa Gözat

Merge trunk into auto-HA branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1333291 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 yıl önce
ebeveyn
işleme
f6c01e2f8e
100 değiştirilmiş dosya ile 2170 ekleme ve 909 silme
  1. 4 0
      BUILDING.txt
  2. 107 35
      dev-support/test-patch.sh
  3. 0 16
      hadoop-client/pom.xml
  4. 0 21
      hadoop-common-project/dev-support/test-patch.properties
  5. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  6. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  7. 66 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  8. 5 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  9. 8 6
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  10. 6 3
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml
  11. 132 32
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  12. 9 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  13. 10 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  14. 58 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  15. 9 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
  16. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  17. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  18. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  19. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java
  20. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  21. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  22. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  23. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
  24. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  25. 60 77
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  26. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  27. 34 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java
  28. 23 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  29. 36 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  30. 5 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  31. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java
  32. 15 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  33. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
  34. 0 118
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java
  35. 29 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  36. 7 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  37. 3 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  38. 13 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  39. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  40. 30 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  41. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java
  42. 28 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
  43. 10 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  44. 0 58
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  45. 181 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
  46. 14 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  47. 0 5
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml
  48. 58 0
      hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
  49. 7 6
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  50. 20 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.io.compress.CompressionCodec
  51. 5 73
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  52. 0 4
      hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
  53. 22 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  54. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
  55. 50 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
  56. 176 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
  57. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
  58. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
  59. 4 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  60. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  61. 96 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  62. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
  63. 25 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
  64. 33 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java
  65. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  66. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
  67. 4 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
  68. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
  69. 9 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
  70. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  71. 62 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
  72. 0 18
      hadoop-common-project/hadoop-common/src/test/resources/test-patch.properties
  73. 0 21
      hadoop-hdfs-project/dev-support/test-patch.properties
  74. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  75. 3 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
  76. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
  77. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
  78. 15 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
  79. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
  80. 73 2
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  81. 13 7
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
  82. 46 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  83. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  84. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  85. 71 31
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
  86. 17 45
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  87. 40 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  88. 41 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  89. 1 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  90. 32 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  91. 31 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  92. 18 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
  93. 71 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
  94. 59 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
  95. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  96. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  97. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  98. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java
  99. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
  100. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java

+ 4 - 0
BUILDING.txt

@@ -87,4 +87,8 @@ Create source and binary distributions with native code and documentation:
 
 
   $ mvn package -Pdist,native,docs,src -DskipTests -Dtar
   $ mvn package -Pdist,native,docs,src -DskipTests -Dtar
 
 
+Create a local staging version of the website (in /tmp/hadoop-site)
+
+  $ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+
 ----------------------------------------------------------------------------------
 ----------------------------------------------------------------------------------

+ 107 - 35
dev-support/test-patch.sh

@@ -39,6 +39,7 @@ WGET=${WGET:-wget}
 SVN=${SVN:-svn}
 SVN=${SVN:-svn}
 GREP=${GREP:-grep}
 GREP=${GREP:-grep}
 PATCH=${PATCH:-patch}
 PATCH=${PATCH:-patch}
+DIFF=${DIFF:-diff}
 JIRACLI=${JIRA:-jira}
 JIRACLI=${JIRA:-jira}
 FINDBUGS_HOME=${FINDBUGS_HOME}
 FINDBUGS_HOME=${FINDBUGS_HOME}
 FORREST_HOME=${FORREST_HOME}
 FORREST_HOME=${FORREST_HOME}
@@ -61,6 +62,7 @@ printUsage() {
   echo "--svn-cmd=<cmd>        The 'svn' command to use (default 'svn')"
   echo "--svn-cmd=<cmd>        The 'svn' command to use (default 'svn')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
+  echo "--diff-cmd=<cmd>       The 'diff' command to use (default 'diff')"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
   echo "--dirty-workspace      Allow the local SVN workspace to have uncommitted changes"
   echo "--dirty-workspace      Allow the local SVN workspace to have uncommitted changes"
@@ -113,6 +115,9 @@ parseArgs() {
     --patch-cmd=*)
     --patch-cmd=*)
       PATCH=${i#*=}
       PATCH=${i#*=}
       ;;
       ;;
+    --diff-cmd=*)
+      DIFF=${i#*=}
+      ;;
     --jira-cmd=*)
     --jira-cmd=*)
       JIRACLI=${i#*=}
       JIRACLI=${i#*=}
       ;;
       ;;
@@ -235,15 +240,6 @@ setup () {
       cleanupAndExit 0
       cleanupAndExit 0
     fi
     fi
   fi
   fi
-  . $BASEDIR/dev-support/test-patch.properties
-  ### exit if warnings are NOT defined in the properties file
-  if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]]; then
-    echo "Please define the following properties in test-patch.properties file"
-	 echo  "OK_FINDBUGS_WARNINGS"
-	 echo  "OK_RELEASEAUDIT_WARNINGS"
-	 echo  "OK_JAVADOC_WARNINGS"
-    cleanupAndExit 1
-  fi
   echo ""
   echo ""
   echo ""
   echo ""
   echo "======================================================================"
   echo "======================================================================"
@@ -384,10 +380,10 @@ checkJavadocWarnings () {
   echo ""
   echo ""
   echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
   echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
   if [ -d hadoop-project ]; then
   if [ -d hadoop-project ]; then
-    (cd hadoop-project; $MVN install)
+    (cd hadoop-project; $MVN install > /dev/null 2>&1)
   fi
   fi
   if [ -d hadoop-common-project/hadoop-annotations ]; then  
   if [ -d hadoop-common-project/hadoop-annotations ]; then  
-    (cd hadoop-common-project/hadoop-annotations; $MVN install)
+    (cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1)
   fi
   fi
   $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
   $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
   javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
   javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
@@ -395,8 +391,10 @@ checkJavadocWarnings () {
   echo ""
   echo ""
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
 
 
+  #There are 6 warnings that are caused by things that are caused by using sun internal APIs.
+  OK_JAVADOC_WARNINGS=6;
   ### if current warnings greater than OK_JAVADOC_WARNINGS
   ### if current warnings greater than OK_JAVADOC_WARNINGS
-  if [[ $javadocWarnings -gt $OK_JAVADOC_WARNINGS ]] ; then
+  if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
     -1 javadoc.  The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
     -1 javadoc.  The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
@@ -430,14 +428,21 @@ checkJavacWarnings () {
   fi
   fi
   ### Compare trunk and patch javac warning numbers
   ### Compare trunk and patch javac warning numbers
   if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
   if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
-    trunkJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
-    patchJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
+    $GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt > $PATCH_DIR/filteredTrunkJavacWarnings.txt
+    $GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt > $PATCH_DIR/filteredPatchJavacWarnings.txt
+    trunkJavacWarnings=`cat $PATCH_DIR/filteredTrunkJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
+    patchJavacWarnings=`cat $PATCH_DIR/filteredPatchJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
     echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
     echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
     if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
     if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
       if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
       if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
         JIRA_COMMENT="$JIRA_COMMENT
         JIRA_COMMENT="$JIRA_COMMENT
 
 
     -1 javac.  The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
     -1 javac.  The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
+
+    $DIFF $PATCH_DIR/filteredTrunkJavacWarnings.txt $PATCH_DIR/filteredPatchJavacWarnings.txt > $PATCH_DIR/diffJavacWarnings.txt 
+        JIRA_COMMENT_FOOTER="Javac warnings: $BUILD_URL/artifact/trunk/$(basename $BASEDIR)/patchprocess/diffJavacWarnings.txt
+$JIRA_COMMENT_FOOTER"
+
         return 1
         return 1
       fi
       fi
     fi
     fi
@@ -460,8 +465,8 @@ checkReleaseAuditWarnings () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1"
-  $MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1
+  echo "$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchReleaseAuditOutput.txt 2>&1"
+  $MVN apache-rat:check -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchReleaseAuditOutput.txt 2>&1
   find $BASEDIR -name rat.txt | xargs cat > $PATCH_DIR/patchReleaseAuditWarnings.txt
   find $BASEDIR -name rat.txt | xargs cat > $PATCH_DIR/patchReleaseAuditWarnings.txt
 
 
   ### Compare trunk and patch release audit warning numbers
   ### Compare trunk and patch release audit warning numbers
@@ -469,12 +474,12 @@ checkReleaseAuditWarnings () {
     patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt`
     patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt`
     echo ""
     echo ""
     echo ""
     echo ""
-    echo "There appear to be $OK_RELEASEAUDIT_WARNINGS release audit warnings before the patch and $patchReleaseAuditWarnings release audit warnings after applying the patch."
-    if [[ $patchReleaseAuditWarnings != "" && $OK_RELEASEAUDIT_WARNINGS != "" ]] ; then
-      if [[ $patchReleaseAuditWarnings -gt $OK_RELEASEAUDIT_WARNINGS ]] ; then
+    echo "There appear to be $patchReleaseAuditWarnings release audit warnings after applying the patch."
+    if [[ $patchReleaseAuditWarnings != "" ]] ; then
+      if [[ $patchReleaseAuditWarnings -gt 0 ]] ; then
         JIRA_COMMENT="$JIRA_COMMENT
         JIRA_COMMENT="$JIRA_COMMENT
 
 
-    -1 release audit.  The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $OK_RELEASEAUDIT_WARNINGS warnings)."
+    -1 release audit.  The applied patch generated $patchReleaseAuditWarnings release audit warnings."
         $GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
         $GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
         echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
         echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
         JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
         JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
@@ -536,10 +541,21 @@ checkFindbugsWarnings () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess" 
-  $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null
+  
+  modules=$(findModules)
+  rc=0
+  for module in $modules;
+  do
+    cd $module
+    echo "  Running findbugs in $module"
+    module_suffix=`basename ${module}`
+    echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null > $PATCH_DIR/patchFindBugsOutput${module_suffix}.txt 2>&1" 
+    $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null > $PATCH_DIR/patchFindBugsOutput${module_suffix}.txt 2>&1
+    (( rc = rc + $? ))
+    cd -
+  done
 
 
-  if [ $? != 0 ] ; then
+  if [ $rc != 0 ] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
     -1 findbugs.  The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
     -1 findbugs.  The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
@@ -572,11 +588,10 @@ $JIRA_COMMENT_FOOTER"
     fi
     fi
   done
   done
 
 
-  ### if current warnings greater than OK_FINDBUGS_WARNINGS
-  if [[ $findbugsWarnings -gt $OK_FINDBUGS_WARNINGS ]] ; then
+  if [[ $findbugsWarnings -gt 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
-    -1 findbugs.  The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings."
+    -1 findbugs.  The patch appears to introduce $findbugsWarnings new Findbugs (version ${findbugs_version}) warnings."
     return 1
     return 1
   fi
   fi
   JIRA_COMMENT="$JIRA_COMMENT
   JIRA_COMMENT="$JIRA_COMMENT
@@ -598,8 +613,8 @@ checkEclipseGeneration () {
   echo ""
   echo ""
   echo ""
   echo ""
 
 
-  echo "$MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess"
-  $MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess
+  echo "$MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchEclipseOutput.txt 2>&1"
+  $MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchEclipseOutput.txt 2>&1
   if [[ $? != 0 ]] ; then
   if [[ $? != 0 ]] ; then
       JIRA_COMMENT="$JIRA_COMMENT
       JIRA_COMMENT="$JIRA_COMMENT
 
 
@@ -627,16 +642,28 @@ runTests () {
   echo ""
   echo ""
   echo ""
   echo ""
 
 
-  echo "$MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
-  $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
-  failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
-  # With -fn mvn always exits with a 0 exit code.  Because of this we need to
-  # find the errors instead of using the exit code.  We assume that if the build
-  # failed a -1 is already given for that case
+  failed_tests=""
+  modules=$(findModules)
+  for module in $modules;
+  do
+    cd $module
+    echo "  Running tests in $module"
+    echo "  $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
+    $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
+    module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
+    # With -fn mvn always exits with a 0 exit code.  Because of this we need to
+    # find the errors instead of using the exit code.  We assume that if the build
+    # failed a -1 is already given for that case
+    if [[ -n "$module_failed_tests" ]] ; then
+      failed_tests="${failed_tests}
+${module_failed_tests}"
+    fi
+    cd -
+  done
   if [[ -n "$failed_tests" ]] ; then
   if [[ -n "$failed_tests" ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
-    -1 core tests.  The patch failed these unit tests:
+    -1 core tests.  The patch failed these unit tests in $modules:
 $failed_tests"
 $failed_tests"
     return 1
     return 1
   fi
   fi
@@ -646,6 +673,51 @@ $failed_tests"
   return 0
   return 0
 }
 }
 
 
+###############################################################################
+# Find the maven module containing the given file.
+findModule (){
+ dir=`dirname $1`
+ while [ 1 ]
+ do
+  if [ -f "$dir/pom.xml" ]
+  then
+    echo $dir
+    return
+  else
+    dir=`dirname $dir`
+  fi
+ done
+}
+
+findModules () {
+  # Come up with a list of changed files into $TMP
+  TMP=/tmp/tmp.paths.$$
+  $GREP '^+++\|^---' $PATCH_DIR/patch | cut -c '5-' | $GREP -v /dev/null | sort | uniq > $TMP
+  
+  # if all of the lines start with a/ or b/, then this is a git patch that
+  # was generated without --no-prefix
+  if ! $GREP -qv '^a/\|^b/' $TMP ; then
+    sed -i -e 's,^[ab]/,,' $TMP
+  fi
+  
+  # Now find all the modules that were changed
+  TMP_MODULES=/tmp/tmp.modules.$$
+  for file in $(cut -f 1 $TMP | sort | uniq); do
+    echo $(findModule $file) >> $TMP_MODULES
+  done
+  rm $TMP
+  
+  # Filter out modules without code 
+  CHANGED_MODULES=""
+  for module in $(cat $TMP_MODULES | sort | uniq); do
+    $GREP "<packaging>pom</packaging>" $module/pom.xml > /dev/null
+    if [ "$?" != 0 ]; then
+      CHANGED_MODULES="$CHANGED_MODULES $module"
+    fi
+  done
+  rm $TMP_MODULES
+  echo $CHANGED_MODULES
+}
 ###############################################################################
 ###############################################################################
 ### Run the test-contrib target
 ### Run the test-contrib target
 runContribTests () {
 runContribTests () {

+ 0 - 16
hadoop-client/pom.xml

@@ -171,10 +171,6 @@
           <groupId>junit</groupId>
           <groupId>junit</groupId>
           <artifactId>junit</artifactId>
           <artifactId>junit</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>com.cenqua.clover</groupId>
-          <artifactId>clover</artifactId>
-        </exclusion>
         <exclusion>
         <exclusion>
           <groupId>org.apache.avro</groupId>
           <groupId>org.apache.avro</groupId>
           <artifactId>avro</artifactId>
           <artifactId>avro</artifactId>
@@ -211,10 +207,6 @@
           <groupId>com.sun.jersey.contribs</groupId>
           <groupId>com.sun.jersey.contribs</groupId>
           <artifactId>jersey-guice</artifactId>
           <artifactId>jersey-guice</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>com.cenqua.clover</groupId>
-          <artifactId>clover</artifactId>
-        </exclusion>
         <exclusion>
         <exclusion>
           <groupId>com.google.inject.extensions</groupId>
           <groupId>com.google.inject.extensions</groupId>
           <artifactId>guice-servlet</artifactId>
           <artifactId>guice-servlet</artifactId>
@@ -263,10 +255,6 @@
           <groupId>com.google.inject.extensions</groupId>
           <groupId>com.google.inject.extensions</groupId>
           <artifactId>guice-servlet</artifactId>
           <artifactId>guice-servlet</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>com.cenqua.clover</groupId>
-          <artifactId>clover</artifactId>
-        </exclusion>
       </exclusions>
       </exclusions>
     </dependency>
     </dependency>
 
 
@@ -291,10 +279,6 @@
           <groupId>com.google.inject.extensions</groupId>
           <groupId>com.google.inject.extensions</groupId>
           <artifactId>guice-servlet</artifactId>
           <artifactId>guice-servlet</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>com.cenqua.clover</groupId>
-          <artifactId>clover</artifactId>
-        </exclusion>
       </exclusions>
       </exclusions>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>

+ 0 - 21
hadoop-common-project/dev-support/test-patch.properties

@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of acceptable warning for this module
-# Please update the root test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=13

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -288,7 +288,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
                 String clientPrincipal = gssContext.getSrcName().toString();
                 String clientPrincipal = gssContext.getSrcName().toString();
                 KerberosName kerberosName = new KerberosName(clientPrincipal);
                 KerberosName kerberosName = new KerberosName(clientPrincipal);
                 String userName = kerberosName.getShortName();
                 String userName = kerberosName.getShortName();
-                token = new AuthenticationToken(userName, clientPrincipal, TYPE);
+                token = new AuthenticationToken(userName, clientPrincipal, getType());
                 response.setStatus(HttpServletResponse.SC_OK);
                 response.setStatus(HttpServletResponse.SC_OK);
                 LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
                 LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
               }
               }

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -126,7 +126,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
         throw new AuthenticationException("Anonymous requests are disallowed");
         throw new AuthenticationException("Anonymous requests are disallowed");
       }
       }
     } else {
     } else {
-      token = new AuthenticationToken(userName, userName, TYPE);
+      token = new AuthenticationToken(userName, userName, getType());
     }
     }
     return token;
     return token;
   }
   }

+ 66 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -63,6 +63,10 @@ Trunk (unreleased changes)
 
 
     HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
     HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
 
 
+    HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+
+    HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -119,6 +123,12 @@ Trunk (unreleased changes)
 
 
     HADOOP-7788. Add simple HealthMonitor class to watch an HAService (todd)
     HADOOP-7788. Add simple HealthMonitor class to watch an HAService (todd)
 
 
+    HADOOP-8312. testpatch.sh should provide a simpler way to see which
+    warnings changed (bobby)
+
+    HADOOP-8339. jenkins complaining about 16 javadoc warnings 
+    (Tom White and Robert Evans via tgraves)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -265,6 +275,15 @@ Release 2.0.0 - UNRELEASED
 
 
     HADOOP-8117. Upgrade test build to Surefire 2.12 (todd)
     HADOOP-8117. Upgrade test build to Surefire 2.12 (todd)
 
 
+    HADOOP-8152. Expand public APIs for security library classes. (atm via eli)
+
+    HADOOP-7549. Use JDK ServiceLoader mechanism to find FileSystem implementations. (tucu)
+
+    HADOOP-8185. Update namenode -format documentation and add -nonInteractive
+    and -force. (Arpit Gupta via atm)
+
+    HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -360,6 +379,33 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8282. start-all.sh refers incorrectly start-dfs.sh
     HADOOP-8282. start-all.sh refers incorrectly start-dfs.sh
     existence for starting start-yarn.sh. (Devaraj K via eli)
     existence for starting start-yarn.sh. (Devaraj K via eli)
 
 
+    HADOOP-7350. Use ServiceLoader to discover compression codec classes.
+    (tomwhite)
+
+    HADOOP-8284. clover integration broken, also mapreduce poms are pulling
+    in clover as a dependency. (phunt via tucu)
+
+    HADOOP-8309. Pseudo & Kerberos AuthenticationHandler should use 
+    getType() to create token (tucu)
+
+    HADOOP-8314. HttpServer#hasAdminAccess should return false if 
+    authorization is enabled but user is not authenticated. (tucu)
+
+    HADOOP-8296. hadoop/yarn daemonlog usage wrong (Devaraj K via tgraves)
+
+    HADOOP-8310. FileContext#checkPath should handle URIs with no port. (atm)
+
+    HADOOP-8321. TestUrlStreamHandler fails. (tucu)
+
+    HADOOP-8325. Add a ShutdownHookManager to be used by different
+    components instead of the JVM shutdownhook (tucu)
+
+    HADOOP-8275. Range check DelegationKey length.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8342. HDFS command fails with exception following merge of 
+    HADOOP-8325 (tucu)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -412,6 +458,12 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8116. RetriableCommand is using RetryPolicy incorrectly after
     HADOOP-8116. RetriableCommand is using RetryPolicy incorrectly after
     HADOOP-7896. (atm)
     HADOOP-7896. (atm)
 
 
+    HADOOP-8317. Update maven-assembly-plugin to 2.3 - fix build on FreeBSD
+    (Radim Kolar via bobby)
+
+    HADOOP-8172. Configuration no longer sets all keys in a deprecated key 
+    list. (Anupam Seth via bobby)
+
 Release 0.23.3 - UNRELEASED
 Release 0.23.3 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -423,6 +475,9 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils.
     HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils.
     (Brandon Li via jitendra)
     (Brandon Li via jitendra)
 
 
+    HADOOP-8288. Remove references of mapred.child.ulimit etc. since they are
+    not being used any more (Ravi Prakash via bobby)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -453,6 +508,17 @@ Release 0.23.3 - UNRELEASED
 
 
     HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby)
     HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby)
 
 
+    HADOOP-8305. distcp over viewfs is broken (John George via bobby)
+
+    HADOOP-8334. HttpServer sometimes returns incorrect port (Daryn Sharp via
+    bobby)
+
+    HADOOP-8330. Update TestSequenceFile.testCreateUsesFsArg() for HADOOP-8305.
+    (John George via szetszwo)
+
+    HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
+    bobby)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -281,9 +281,14 @@
     <Match>
     <Match>
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
+    </Match>
+        <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.RpcPayloadHeaderProtos.*"/>
     </Match>
     </Match>
     <Match>
     <Match>
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
     </Match>
     </Match>
+
  </FindBugsFilter>
  </FindBugsFilter>

+ 8 - 6
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -50,15 +50,16 @@ fi
 COMMAND=$1
 COMMAND=$1
 case $COMMAND in
 case $COMMAND in
   #hdfs commands
   #hdfs commands
-  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt)
+  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups)
     echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
     echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
     echo "Instead use the hdfs command for it." 1>&2
     echo "Instead use the hdfs command for it." 1>&2
     echo "" 1>&2
     echo "" 1>&2
     #try to locate hdfs and if present, delegate to it.  
     #try to locate hdfs and if present, delegate to it.  
+    shift
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs $*
+      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  $*
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs $*
+      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
     else
     else
       echo "HADOOP_HDFS_HOME not found!"
       echo "HADOOP_HDFS_HOME not found!"
       exit 1
       exit 1
@@ -66,15 +67,16 @@ case $COMMAND in
     ;;
     ;;
 
 
   #mapred commands for backwards compatibility
   #mapred commands for backwards compatibility
-  pipes|job|queue)
+  pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
     echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
     echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
     echo "Instead use the mapred command for it." 1>&2
     echo "Instead use the mapred command for it." 1>&2
     echo "" 1>&2
     echo "" 1>&2
     #try to locate mapred and if present, delegate to it.
     #try to locate mapred and if present, delegate to it.
+    shift
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred $*
+      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $*
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred $*
+      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $*
     else
     else
       echo "HADOOP_MAPRED_HOME not found!"
       echo "HADOOP_MAPRED_HOME not found!"
       exit 1
       exit 1

+ 6 - 3
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml

@@ -696,7 +696,7 @@
 					<a href="http://hadoop.apache.org/hdfs/docs/current/hdfs_user_guide.html#Upgrade+and+Rollback">Upgrade and Rollback</a>.
 					<a href="http://hadoop.apache.org/hdfs/docs/current/hdfs_user_guide.html#Upgrade+and+Rollback">Upgrade and Rollback</a>.
 				</p>
 				</p>
 				<p>
 				<p>
-					<code>Usage: hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint] | [-checkpoint] | [-backup]</code>
+					<code>Usage: hadoop namenode [-format [-force] [-nonInteractive] [-clusterid someid]] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint] | [-checkpoint] | [-backup]</code>
 				</p>
 				</p>
 				<table>
 				<table>
 			          <tr><th> COMMAND_OPTION </th><th> Description </th></tr>
 			          <tr><th> COMMAND_OPTION </th><th> Description </th></tr>
@@ -714,8 +714,11 @@
                   <td>Start namenode in backup role, maintaining an up-to-date in-memory copy of the namespace and creating periodic checkpoints.</td>
                   <td>Start namenode in backup role, maintaining an up-to-date in-memory copy of the namespace and creating periodic checkpoints.</td>
                 </tr>
                 </tr>
 			           <tr>
 			           <tr>
-			          	<td><code>-format</code></td>
-			            <td>Formats the namenode. It starts the namenode, formats it and then shut it down.</td>
+			          	<td><code>-format [-force] [-nonInteractive] [-clusterid someid]</code></td>
+			            <td>Formats the namenode. It starts the namenode, formats it and then shuts it down. User will be prompted before formatting any non empty name directories in the local filesystem.<br/>
+                                    -nonInteractive: User will not be prompted for input if non empty name directories exist in the local filesystem and the format will fail.<br/>
+                                    -force: Formats the namenode and the user will NOT be prompted to confirm formatting of the name directories in the local filesystem. If -nonInteractive option is specified it will be ignored.<br/>
+                                    -clusterid: Associates the namenode with the id specified. When formatting federated namenodes use this option to make sure all namenodes are associated with the same id.</td>
 			           </tr>
 			           </tr>
 			           <tr>
 			           <tr>
 			          	<td><code>-upgrade</code></td>
 			          	<td><code>-upgrade</code></td>

+ 132 - 32
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -33,6 +33,7 @@ import java.io.Writer;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Enumeration;
 import java.util.Enumeration;
@@ -269,10 +270,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * This is to be used only by the developers in order to add deprecation of
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
    * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * If a key is deprecated in favor of multiple keys, they are all treated as 
+   * aliases of each other, and setting any one of them resets all the others 
+   * to the new value.
+   * 
    * @param key
    * @param key
    * @param newKeys
    * @param newKeys
    * @param customMessage
    * @param customMessage
+   * @deprecated use {@link addDeprecation(String key, String newKey,
+      String customMessage)} instead
    */
    */
+  @Deprecated
   public synchronized static void addDeprecation(String key, String[] newKeys,
   public synchronized static void addDeprecation(String key, String[] newKeys,
       String customMessage) {
       String customMessage) {
     if (key == null || key.length() == 0 ||
     if (key == null || key.length() == 0 ||
@@ -288,6 +297,22 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
       }
     }
     }
   }
   }
+  
+  /**
+   * Adds the deprecated key to the deprecation map.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * @param key
+   * @param newKey
+   * @param customMessage
+   */
+  public synchronized static void addDeprecation(String key, String newKey,
+	      String customMessage) {
+	  addDeprecation(key, new String[] {newKey}, customMessage);
+  }
 
 
   /**
   /**
    * Adds the deprecated key to the deprecation map when no custom message
    * Adds the deprecated key to the deprecation map when no custom message
@@ -297,13 +322,34 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * keys, and attempts to call this method after loading resources once,
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
    * would lead to <tt>UnsupportedOperationException</tt>
    * 
    * 
+   * If a key is deprecated in favor of multiple keys, they are all treated as 
+   * aliases of each other, and setting any one of them resets all the others 
+   * to the new value.
+   * 
    * @param key Key that is to be deprecated
    * @param key Key that is to be deprecated
    * @param newKeys list of keys that take up the values of deprecated key
    * @param newKeys list of keys that take up the values of deprecated key
+   * @deprecated use {@link addDeprecation(String key, String newKey)} instead
    */
    */
+  @Deprecated
   public synchronized static void addDeprecation(String key, String[] newKeys) {
   public synchronized static void addDeprecation(String key, String[] newKeys) {
     addDeprecation(key, newKeys, null);
     addDeprecation(key, newKeys, null);
   }
   }
   
   
+  /**
+   * Adds the deprecated key to the deprecation map when no custom message
+   * is provided.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * @param key Key that is to be deprecated
+   * @param newKey key that takes up the value of deprecated key
+   */
+  public synchronized static void addDeprecation(String key, String newKey) {
+	addDeprecation(key, new String[] {newKey}, null);
+  }
+  
   /**
   /**
    * checks whether the given <code>key</code> is deprecated.
    * checks whether the given <code>key</code> is deprecated.
    * 
    * 
@@ -322,16 +368,26 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param name property name.
    * @param name property name.
    * @return alternate name.
    * @return alternate name.
    */
    */
-  private String getAlternateName(String name) {
-    String altName;
+  private String[] getAlternateNames(String name) {
+    String oldName, altNames[] = null;
     DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
     DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
-    if (keyInfo != null) {
-      altName = (keyInfo.newKeys.length > 0) ? keyInfo.newKeys[0] : null;
-    }
-    else {
-      altName = reverseDeprecatedKeyMap.get(name);
+    if (keyInfo == null) {
+      altNames = (reverseDeprecatedKeyMap.get(name) != null ) ? 
+        new String [] {reverseDeprecatedKeyMap.get(name)} : null;
+      if(altNames != null && altNames.length > 0) {
+    	//To help look for other new configs for this deprecated config
+    	keyInfo = deprecatedKeyMap.get(altNames[0]);
+      }      
+    } 
+    if(keyInfo != null && keyInfo.newKeys.length > 0) {
+      List<String> list = new ArrayList<String>(); 
+      if(altNames != null) {
+    	  list.addAll(Arrays.asList(altNames));
+      }
+      list.addAll(Arrays.asList(keyInfo.newKeys));
+      altNames = list.toArray(new String[list.size()]);
     }
     }
-    return altName;
+    return altNames;
   }
   }
 
 
   /**
   /**
@@ -346,24 +402,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return the first property in the list of properties mapping
    * @return the first property in the list of properties mapping
    *         the <code>name</code> or the <code>name</code> itself.
    *         the <code>name</code> or the <code>name</code> itself.
    */
    */
-  private String handleDeprecation(String name) {
-    if (isDeprecated(name)) {
+  private String[] handleDeprecation(String name) {
+    ArrayList<String > names = new ArrayList<String>();
+	if (isDeprecated(name)) {
       DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
       DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
       warnOnceIfDeprecated(name);
       warnOnceIfDeprecated(name);
       for (String newKey : keyInfo.newKeys) {
       for (String newKey : keyInfo.newKeys) {
         if(newKey != null) {
         if(newKey != null) {
-          name = newKey;
-          break;
+          names.add(newKey);
         }
         }
       }
       }
     }
     }
-    String deprecatedKey = reverseDeprecatedKeyMap.get(name);
-    if (deprecatedKey != null && !getOverlay().containsKey(name) &&
-        getOverlay().containsKey(deprecatedKey)) {
-      getProps().setProperty(name, getOverlay().getProperty(deprecatedKey));
-      getOverlay().setProperty(name, getOverlay().getProperty(deprecatedKey));
+    if(names.size() == 0) {
+    	names.add(name);
     }
     }
-    return name;
+    for(String n : names) {
+	  String deprecatedKey = reverseDeprecatedKeyMap.get(n);
+	  if (deprecatedKey != null && !getOverlay().containsKey(n) &&
+	      getOverlay().containsKey(deprecatedKey)) {
+	    getProps().setProperty(n, getOverlay().getProperty(deprecatedKey));
+	    getOverlay().setProperty(n, getOverlay().getProperty(deprecatedKey));
+	  }
+    }
+    return names.toArray(new String[names.size()]);
   }
   }
  
  
   private void handleDeprecation() {
   private void handleDeprecation() {
@@ -595,8 +656,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         or null if no such property exists.
    *         or null if no such property exists.
    */
    */
   public String get(String name) {
   public String get(String name) {
-    name = handleDeprecation(name);
-    return substituteVars(getProps().getProperty(name));
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = substituteVars(getProps().getProperty(n));
+    }
+    return result;
   }
   }
   
   
   /**
   /**
@@ -633,8 +698,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         its replacing property and null if no such property exists.
    *         its replacing property and null if no such property exists.
    */
    */
   public String getRaw(String name) {
   public String getRaw(String name) {
-    name = handleDeprecation(name);
-    return getProps().getProperty(name);
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = getProps().getProperty(n);
+    }
+    return result;
   }
   }
 
 
   /** 
   /** 
@@ -652,10 +721,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     getOverlay().setProperty(name, value);
     getOverlay().setProperty(name, value);
     getProps().setProperty(name, value);
     getProps().setProperty(name, value);
     updatingResource.put(name, UNKNOWN_RESOURCE);
     updatingResource.put(name, UNKNOWN_RESOURCE);
-    String altName = getAlternateName(name);
-    if (altName != null) {
-      getOverlay().setProperty(altName, value);
-      getProps().setProperty(altName, value);
+    String[] altNames = getAlternateNames(name);
+    if (altNames != null && altNames.length > 0) {
+      for(String altName : altNames) {
+    	getOverlay().setProperty(altName, value);
+        getProps().setProperty(altName, value);
+      }
     }
     }
     warnOnceIfDeprecated(name);
     warnOnceIfDeprecated(name);
   }
   }
@@ -671,12 +742,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Unset a previously set property.
    * Unset a previously set property.
    */
    */
   public synchronized void unset(String name) {
   public synchronized void unset(String name) {
-    String altName = getAlternateName(name);
+    String[] altNames = getAlternateNames(name);
     getOverlay().remove(name);
     getOverlay().remove(name);
     getProps().remove(name);
     getProps().remove(name);
-    if (altName !=null) {
-      getOverlay().remove(altName);
-       getProps().remove(altName);
+    if (altNames !=null && altNames.length > 0) {
+      for(String altName : altNames) {
+    	getOverlay().remove(altName);
+    	getProps().remove(altName);
+      }
     }
     }
   }
   }
 
 
@@ -711,8 +784,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         doesn't exist.                    
    *         doesn't exist.                    
    */
    */
   public String get(String name, String defaultValue) {
   public String get(String name, String defaultValue) {
-    name = handleDeprecation(name);
-    return substituteVars(getProps().getProperty(name, defaultValue));
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = substituteVars(getProps().getProperty(n, defaultValue));
+    }
+    return result;
   }
   }
     
     
   /** 
   /** 
@@ -1236,6 +1313,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     final String address = get(name, defaultAddress);
     final String address = get(name, defaultAddress);
     return NetUtils.createSocketAddr(address, defaultPort, name);
     return NetUtils.createSocketAddr(address, defaultPort, name);
   }
   }
+
+  /**
+   * Set the socket address for the <code>name</code> property as
+   * a <code>host:port</code>.
+   */
+  public void setSocketAddr(String name, InetSocketAddress addr) {
+    set(name, NetUtils.getHostPortString(addr));
+  }
+  
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address.
+   * @param name property name.
+   * @param addr InetSocketAddress of a listener to store in the given property
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(String name,
+                                             InetSocketAddress addr) {
+    final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
+    setSocketAddr(name, connectAddr);
+    return connectAddr;
+  }
   
   
   /**
   /**
    * Load a class by name.
    * Load a class by name.

+ 9 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -350,20 +350,23 @@ public abstract class AbstractFileSystem {
       }
       }
     }
     }
     String thisScheme = this.getUri().getScheme();
     String thisScheme = this.getUri().getScheme();
-    String thisAuthority = this.getUri().getAuthority();
+    String thisHost = this.getUri().getHost();
+    String thatHost = uri.getHost();
     
     
-    // Schemes and authorities must match.
+    // Schemes and hosts must match.
     // Allow for null Authority for file:///
     // Allow for null Authority for file:///
     if (!thisScheme.equalsIgnoreCase(thatScheme) ||
     if (!thisScheme.equalsIgnoreCase(thatScheme) ||
-       (thisAuthority != null && 
-            !thisAuthority.equalsIgnoreCase(thatAuthority)) ||
-       (thisAuthority == null && thatAuthority != null)) {
+       (thisHost != null && 
+            !thisHost.equalsIgnoreCase(thatHost)) ||
+       (thisHost == null && thatHost != null)) {
       throw new InvalidPathException("Wrong FS: " + path + ", expected: "
       throw new InvalidPathException("Wrong FS: " + path + ", expected: "
           + this.getUri());
           + this.getUri());
     }
     }
     
     
+    // Ports must match, unless this FS instance is using the default port, in
+    // which case the port may be omitted from the given URI
     int thisPort = this.getUri().getPort();
     int thisPort = this.getUri().getPort();
-    int thatPort = path.toUri().getPort();
+    int thatPort = uri.getPort();
     if (thatPort == -1) { // -1 => defaultPort of Uri scheme
     if (thatPort == -1) { // -1 => defaultPort of Uri scheme
       thatPort = this.getUriDefaultPort();
       thatPort = this.getUriDefaultPort();
     }
     }

+ 10 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ShutdownHookManager;
 
 
 /**
 /**
  * The FileContext class provides an interface to the application writer for
  * The FileContext class provides an interface to the application writer for
@@ -171,7 +172,12 @@ public final class FileContext {
   
   
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
-  
+
+  /**
+   * Priority of the FileContext shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 20;
+
   /**
   /**
    * List of files that should be deleted on JVM shutdown.
    * List of files that should be deleted on JVM shutdown.
    */
    */
@@ -1456,8 +1462,8 @@ public final class FileContext {
       return false;
       return false;
     }
     }
     synchronized (DELETE_ON_EXIT) {
     synchronized (DELETE_ON_EXIT) {
-      if (DELETE_ON_EXIT.isEmpty() && !FINALIZER.isAlive()) {
-        Runtime.getRuntime().addShutdownHook(FINALIZER);
+      if (DELETE_ON_EXIT.isEmpty()) {
+        ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
       }
       }
       
       
       Set<Path> set = DELETE_ON_EXIT.get(this);
       Set<Path> set = DELETE_ON_EXIT.get(this);
@@ -2215,7 +2221,7 @@ public final class FileContext {
   /**
   /**
    * Deletes all the paths in deleteOnExit on JVM shutdown.
    * Deletes all the paths in deleteOnExit on JVM shutdown.
    */
    */
-  static class FileContextFinalizer extends Thread {
+  static class FileContextFinalizer implements Runnable {
     public synchronized void run() {
     public synchronized void run() {
       processDeleteOnExit();
       processDeleteOnExit();
     }
     }

+ 58 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -32,6 +32,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.NoSuchElementException;
+import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.Set;
 import java.util.Stack;
 import java.util.Stack;
 import java.util.TreeSet;
 import java.util.TreeSet;
@@ -54,6 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 
 
 /****************************************************************
 /****************************************************************
  * An abstract base class for a fairly generic filesystem.  It
  * An abstract base class for a fairly generic filesystem.  It
@@ -83,6 +85,11 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 
 
+  /**
+   * Priority of the FileSystem shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 10;
+
   /** FileSystem cache */
   /** FileSystem cache */
   static final Cache CACHE = new Cache();
   static final Cache CACHE = new Cache();
 
 
@@ -184,6 +191,17 @@ public abstract class FileSystem extends Configured implements Closeable {
     statistics = getStatistics(name.getScheme(), getClass());    
     statistics = getStatistics(name.getScheme(), getClass());    
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   * This implementation throws an <code>UnsupportedOperationException</code>.
+   *
+   * @return the protocol scheme for the FileSystem.
+   */
+  public String getScheme() {
+    throw new UnsupportedOperationException("Not implemented by  the FileSystem implementation");
+  }
+
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
   public abstract URI getUri();
   public abstract URI getUri();
   
   
@@ -2078,9 +2096,45 @@ public abstract class FileSystem extends Configured implements Closeable {
       ) throws IOException {
       ) throws IOException {
   }
   }
 
 
+  // making it volatile to be able to do a double checked locking
+  private volatile static boolean FILE_SYSTEMS_LOADED = false;
+
+  private static final Map<String, Class<? extends FileSystem>>
+    SERVICE_FILE_SYSTEMS = new HashMap<String, Class<? extends FileSystem>>();
+
+  private static void loadFileSystems() {
+    synchronized (FileSystem.class) {
+      if (!FILE_SYSTEMS_LOADED) {
+        ServiceLoader<FileSystem> serviceLoader = ServiceLoader.load(FileSystem.class);
+        for (FileSystem fs : serviceLoader) {
+          SERVICE_FILE_SYSTEMS.put(fs.getScheme(), fs.getClass());
+        }
+        FILE_SYSTEMS_LOADED = true;
+      }
+    }
+  }
+
+  public static Class<? extends FileSystem> getFileSystemClass(String scheme,
+      Configuration conf) throws IOException {
+    if (!FILE_SYSTEMS_LOADED) {
+      loadFileSystems();
+    }
+    Class<? extends FileSystem> clazz = null;
+    if (conf != null) {
+      clazz = (Class<? extends FileSystem>) conf.getClass("fs." + scheme + ".impl", null);
+    }
+    if (clazz == null) {
+      clazz = SERVICE_FILE_SYSTEMS.get(scheme);
+    }
+    if (clazz == null) {
+      throw new IOException("No FileSystem for scheme: " + scheme);
+    }
+    return clazz;
+  }
+
   private static FileSystem createFileSystem(URI uri, Configuration conf
   private static FileSystem createFileSystem(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {
-    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
+    Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
     if (clazz == null) {
     if (clazz == null) {
       throw new IOException("No FileSystem for scheme: " + uri.getScheme());
       throw new IOException("No FileSystem for scheme: " + uri.getScheme());
     }
     }
@@ -2128,8 +2182,8 @@ public abstract class FileSystem extends Configured implements Closeable {
         }
         }
         
         
         // now insert the new file system into the map
         // now insert the new file system into the map
-        if (map.isEmpty() && !clientFinalizer.isAlive()) {
-          Runtime.getRuntime().addShutdownHook(clientFinalizer);
+        if (map.isEmpty() ) {
+          ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
         }
         }
         fs.key = key;
         fs.key = key;
         map.put(key, fs);
         map.put(key, fs);
@@ -2144,13 +2198,7 @@ public abstract class FileSystem extends Configured implements Closeable {
       if (map.containsKey(key) && fs == map.get(key)) {
       if (map.containsKey(key) && fs == map.get(key)) {
         map.remove(key);
         map.remove(key);
         toAutoClose.remove(key);
         toAutoClose.remove(key);
-        if (map.isEmpty() && !clientFinalizer.isAlive()) {
-          if (!Runtime.getRuntime().removeShutdownHook(clientFinalizer)) {
-            LOG.info("Could not cancel cleanup thread, though no " +
-                     "FileSystems are open");
-          }
         }
         }
-      }
     }
     }
 
 
     synchronized void closeAll() throws IOException {
     synchronized void closeAll() throws IOException {
@@ -2194,7 +2242,7 @@ public abstract class FileSystem extends Configured implements Closeable {
       }
       }
     }
     }
 
 
-    private class ClientFinalizer extends Thread {
+    private class ClientFinalizer implements Runnable {
       public synchronized void run() {
       public synchronized void run() {
         try {
         try {
           closeAll(true);
           closeAll(true);

+ 9 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import java.io.IOException;
 import java.net.URLStreamHandlerFactory;
 import java.net.URLStreamHandlerFactory;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
@@ -50,25 +51,23 @@ public class FsUrlStreamHandlerFactory implements
   private java.net.URLStreamHandler handler;
   private java.net.URLStreamHandler handler;
 
 
   public FsUrlStreamHandlerFactory() {
   public FsUrlStreamHandlerFactory() {
-    this.conf = new Configuration();
-    // force the resolution of the configuration files
-    // this is required if we want the factory to be able to handle
-    // file:// URLs
-    this.conf.getClass("fs.file.impl", null);
-    this.handler = new FsUrlStreamHandler(this.conf);
+    this(new Configuration());
   }
   }
 
 
   public FsUrlStreamHandlerFactory(Configuration conf) {
   public FsUrlStreamHandlerFactory(Configuration conf) {
     this.conf = new Configuration(conf);
     this.conf = new Configuration(conf);
-    // force the resolution of the configuration files
-    this.conf.getClass("fs.file.impl", null);
     this.handler = new FsUrlStreamHandler(this.conf);
     this.handler = new FsUrlStreamHandler(this.conf);
   }
   }
 
 
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
     if (!protocols.containsKey(protocol)) {
     if (!protocols.containsKey(protocol)) {
-      boolean known =
-          (conf.getClass("fs." + protocol + ".impl", null) != null);
+      boolean known = true;
+      try {
+        FileSystem.getFileSystemClass(protocol, conf);
+      }
+      catch (IOException ex) {
+        known = false;
+      }
       protocols.put(protocol, known);
       protocols.put(protocol, known);
     }
     }
     if (protocols.get(protocol)) {
     if (protocols.get(protocol)) {

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -71,7 +71,18 @@ public class HarFileSystem extends FilterFileSystem {
    */
    */
   public HarFileSystem() {
   public HarFileSystem() {
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>har</code>
+   */
+  @Override
+  public String getScheme() {
+    return "har";
+  }
+
   /**
   /**
    * Constructor to create a HarFileSystem with an
    * Constructor to create a HarFileSystem with an
    * underlying filesystem.
    * underlying filesystem.

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -39,7 +39,18 @@ public class LocalFileSystem extends ChecksumFileSystem {
   public LocalFileSystem() {
   public LocalFileSystem() {
     this(new RawLocalFileSystem());
     this(new RawLocalFileSystem());
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>file</code>
+   */
+  @Override
+  public String getScheme() {
+    return "file";
+  }
+
   public FileSystem getRaw() {
   public FileSystem getRaw() {
     return getRawFileSystem();
     return getRawFileSystem();
   }
   }

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -59,6 +59,17 @@ public class FTPFileSystem extends FileSystem {
 
 
   private URI uri;
   private URI uri;
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>ftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "ftp";
+  }
+
   @Override
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException { // get
   public void initialize(URI uri, Configuration conf) throws IOException { // get
     super.initialize(uri, conf);
     super.initialize(uri, conf);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java

@@ -57,6 +57,17 @@ public class KosmosFileSystem extends FileSystem {
         this.kfsImpl = fsimpl;
         this.kfsImpl = fsimpl;
     }
     }
 
 
+    /**
+     * Return the protocol scheme for the FileSystem.
+     * <p/>
+     *
+     * @return <code>kfs</code>
+     */
+    @Override
+    public String getScheme() {
+      return "kfs";
+    }
+
     @Override
     @Override
     public URI getUri() {
     public URI getUri() {
 	return uri;
 	return uri;

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -67,6 +67,17 @@ public class S3FileSystem extends FileSystem {
     this.store = store;
     this.store = store;
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>s3</code>
+   */
+  @Override
+  public String getScheme() {
+    return "s3";
+  }
+
   @Override
   @Override
   public URI getUri() {
   public URI getUri() {
     return uri;
     return uri;

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -251,7 +251,18 @@ public class NativeS3FileSystem extends FileSystem {
   public NativeS3FileSystem(NativeFileSystemStore store) {
   public NativeS3FileSystem(NativeFileSystemStore store) {
     this.store = store;
     this.store = store;
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>s3n</code>
+   */
+  @Override
+  public String getScheme() {
+    return "s3n";
+  }
+
   @Override
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
   public void initialize(URI uri, Configuration conf) throws IOException {
     super.initialize(uri, conf);
     super.initialize(uri, conf);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -149,6 +149,17 @@ public class ViewFileSystem extends FileSystem {
     creationTime = System.currentTimeMillis();
     creationTime = System.currentTimeMillis();
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>viewfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "viewfs";
+  }
+
   /**
   /**
    * Called after a new FileSystem instance is constructed.
    * Called after a new FileSystem instance is constructed.
    * @param theUri a uri whose authority section names the host, port, etc. for
    * @param theUri a uri whose authority section names the host, port, etc. for

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java

@@ -52,7 +52,7 @@ public interface FenceMethod {
   
   
   /**
   /**
    * Attempt to fence the target node.
    * Attempt to fence the target node.
-   * @param serviceAddr the address (host:ipcport) of the service to fence
+   * @param target the target of the service to fence
    * @param args the configured arguments, which were checked at startup by
    * @param args the configured arguments, which were checked at startup by
    *             {@link #checkArgs(String)}
    *             {@link #checkArgs(String)}
    * @return true if fencing was successful, false if unsuccessful or
    * @return true if fencing was successful, false if unsuccessful or

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -144,12 +144,13 @@ public interface HAServiceProtocol {
   /**
   /**
    * Return the current status of the service. The status indicates
    * Return the current status of the service. The status indicates
    * the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
    * the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
-   * some additional information. {@see HAServiceStatus}
+   * some additional information.
    * 
    * 
    * @throws AccessControlException
    * @throws AccessControlException
    *           if access is denied.
    *           if access is denied.
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
+   * @see HAServiceStatus
    */
    */
   public HAServiceStatus getServiceStatus() throws AccessControlException,
   public HAServiceStatus getServiceStatus() throws AccessControlException,
                                                    IOException;
                                                    IOException;

+ 60 - 77
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -636,80 +636,16 @@ public class HttpServer implements FilterContainer {
    */
    */
   public void start() throws IOException {
   public void start() throws IOException {
     try {
     try {
-      if(listenerStartedExternally) { // Expect that listener was started securely
-        if(listener.getLocalPort() == -1) // ... and verify
-          throw new Exception("Exepected webserver's listener to be started " +
-             "previously but wasn't");
-        // And skip all the port rolling issues.
+      try {
+        openListener();
+        LOG.info("Jetty bound to port " + listener.getLocalPort());
         webServer.start();
         webServer.start();
-      } else {
-        int port = 0;
-        int oriPort = listener.getPort(); // The original requested port
-        while (true) {
-          try {
-            port = webServer.getConnectors()[0].getLocalPort();
-            LOG.debug("Port returned by webServer.getConnectors()[0]." +
-            		"getLocalPort() before open() is "+ port + 
-            		". Opening the listener on " + oriPort);
-            listener.open();
-            port = listener.getLocalPort();
-            LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() + 
-                  " webServer.getConnectors()[0].getLocalPort() returned " +
-                  webServer.getConnectors()[0].getLocalPort());
-            //Workaround to handle the problem reported in HADOOP-4744
-            if (port < 0) {
-              Thread.sleep(100);
-              int numRetries = 1;
-              while (port < 0) {
-                LOG.warn("listener.getLocalPort returned " + port);
-                if (numRetries++ > MAX_RETRIES) {
-                  throw new Exception(" listener.getLocalPort is returning " +
-                  		"less than 0 even after " +numRetries+" resets");
-                }
-                for (int i = 0; i < 2; i++) {
-                  LOG.info("Retrying listener.getLocalPort()");
-                  port = listener.getLocalPort();
-                  if (port > 0) {
-                    break;
-                  }
-                  Thread.sleep(200);
-                }
-                if (port > 0) {
-                  break;
-                }
-                LOG.info("Bouncing the listener");
-                listener.close();
-                Thread.sleep(1000);
-                listener.setPort(oriPort == 0 ? 0 : (oriPort += 1));
-                listener.open();
-                Thread.sleep(100);
-                port = listener.getLocalPort();
-              }
-            } //Workaround end
-            LOG.info("Jetty bound to port " + port);
-            webServer.start();
-            break;
-          } catch (IOException ex) {
-            // if this is a bind exception,
-            // then try the next port number.
-            if (ex instanceof BindException) {
-              if (!findPort) {
-                BindException be = new BindException(
-                        "Port in use: " + listener.getHost()
-                                + ":" + listener.getPort());
-                be.initCause(ex);
-                throw be;
-              }
-            } else {
-              LOG.info("HttpServer.start() threw a non Bind IOException"); 
-              throw ex;
-            }
-          } catch (MultiException ex) {
-            LOG.info("HttpServer.start() threw a MultiException"); 
-            throw ex;
-          }
-          listener.setPort((oriPort += 1));
-        }
+      } catch (IOException ex) {
+        LOG.info("HttpServer.start() threw a non Bind IOException", ex);
+        throw ex;
+      } catch (MultiException ex) {
+        LOG.info("HttpServer.start() threw a MultiException", ex);
+        throw ex;
       }
       }
       // Make sure there is no handler failures.
       // Make sure there is no handler failures.
       Handler[] handlers = webServer.getHandlers();
       Handler[] handlers = webServer.getHandlers();
@@ -729,6 +665,52 @@ public class HttpServer implements FilterContainer {
     }
     }
   }
   }
 
 
+  /**
+   * Open the main listener for the server
+   * @throws Exception
+   */
+  void openListener() throws Exception {
+    if (listener.getLocalPort() != -1) { // it's already bound
+      return;
+    }
+    if (listenerStartedExternally) { // Expect that listener was started securely
+      throw new Exception("Expected webserver's listener to be started " +
+          "previously but wasn't");
+    }
+    int port = listener.getPort();
+    while (true) {
+      // jetty has a bug where you can't reopen a listener that previously
+      // failed to open w/o issuing a close first, even if the port is changed
+      try {
+        listener.close();
+        listener.open();
+        break;
+      } catch (BindException ex) {
+        if (port == 0 || !findPort) {
+          BindException be = new BindException(
+              "Port in use: " + listener.getHost() + ":" + listener.getPort());
+          be.initCause(ex);
+          throw be;
+        }
+      }
+      // try the next port number
+      listener.setPort(++port);
+      Thread.sleep(100);
+    }
+  }
+  
+  /**
+   * Return the bind address of the listener.
+   * @return InetSocketAddress of the listener
+   */
+  public InetSocketAddress getListenerAddress() {
+    int port = listener.getLocalPort();
+    if (port == -1) { // not bound, return requested port
+      port = listener.getPort();
+    }
+    return new InetSocketAddress(listener.getHost(), port);
+  }
+  
   /**
   /**
    * stop the server
    * stop the server
    */
    */
@@ -821,7 +803,10 @@ public class HttpServer implements FilterContainer {
 
 
     String remoteUser = request.getRemoteUser();
     String remoteUser = request.getRemoteUser();
     if (remoteUser == null) {
     if (remoteUser == null) {
-      return true;
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+                         "Unauthenticated users are not " +
+                         "authorized to access this page.");
+      return false;
     }
     }
     AccessControlList adminsAcl = (AccessControlList) servletContext
     AccessControlList adminsAcl = (AccessControlList) servletContext
         .getAttribute(ADMINS_ACL);
         .getAttribute(ADMINS_ACL);
@@ -830,9 +815,7 @@ public class HttpServer implements FilterContainer {
     if (adminsAcl != null) {
     if (adminsAcl != null) {
       if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
       if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
         response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
         response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
-            + remoteUser + " is unauthorized to access this page. "
-            + "AccessControlList for accessing this page : "
-            + adminsAcl.toString());
+            + remoteUser + " is unauthorized to access this page.");
         return false;
         return false;
       }
       }
     }
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -1050,9 +1050,9 @@ public class SequenceFile {
         int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
         int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
           bufferSizeOption.getValue();
           bufferSizeOption.getValue();
         short replication = replicationOption == null ? 
         short replication = replicationOption == null ? 
-          fs.getDefaultReplication() :
+          fs.getDefaultReplication(p) :
           (short) replicationOption.getValue();
           (short) replicationOption.getValue();
-        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize() :
+        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
           blockSizeOption.getValue();
           blockSizeOption.getValue();
         Progressable progress = progressOption == null ? null :
         Progressable progress = progressOption == null ? null :
           progressOption.getValue();
           progressOption.getValue();

+ 34 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java

@@ -326,9 +326,41 @@ public final class WritableUtils  {
    * @return deserialized integer from stream.
    * @return deserialized integer from stream.
    */
    */
   public static int readVInt(DataInput stream) throws IOException {
   public static int readVInt(DataInput stream) throws IOException {
-    return (int) readVLong(stream);
+    long n = readVLong(stream);
+    if ((n > Integer.MAX_VALUE) || (n < Integer.MIN_VALUE)) {
+      throw new IOException("value too long to fit in integer");
+    }
+    return (int)n;
+  }
+
+  /**
+   * Reads an integer from the input stream and returns it.
+   *
+   * This function validates that the integer is between [lower, upper],
+   * inclusive.
+   *
+   * @param stream Binary input stream
+   * @throws java.io.IOException
+   * @return deserialized integer from stream
+   */
+  public static int readVIntInRange(DataInput stream, int lower, int upper)
+      throws IOException {
+    long n = readVLong(stream);
+    if (n < lower) {
+      if (lower == 0) {
+        throw new IOException("expected non-negative integer, got " + n);
+      } else {
+        throw new IOException("expected integer greater than or equal to " +
+            lower + ", got " + n);
+      }
+    }
+    if (n > upper) {
+      throw new IOException("expected integer less or equal to " + upper +
+          ", got " + n);
+    }
+    return (int)n;
   }
   }
- 
+
   /**
   /**
    * Given the first byte of a vint/vlong, determine the sign
    * Given the first byte of a vint/vlong, determine the sign
    * @param value the first byte
    * @param value the first byte

+ 23 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -36,6 +36,9 @@ public class CompressionCodecFactory {
 
 
   public static final Log LOG =
   public static final Log LOG =
     LogFactory.getLog(CompressionCodecFactory.class.getName());
     LogFactory.getLog(CompressionCodecFactory.class.getName());
+  
+  private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
+    ServiceLoader.load(CompressionCodec.class);
 
 
   /**
   /**
    * A map from the reversed filename suffixes to the codecs.
    * A map from the reversed filename suffixes to the codecs.
@@ -95,16 +98,23 @@ public class CompressionCodecFactory {
   }
   }
 
 
   /**
   /**
-   * Get the list of codecs listed in the configuration
+   * Get the list of codecs discovered via a Java ServiceLoader, or
+   * listed in the configuration. Codecs specified in configuration come
+   * later in the returned list, and are considered to override those
+   * from the ServiceLoader.
    * @param conf the configuration to look in
    * @param conf the configuration to look in
-   * @return a list of the Configuration classes or null if the attribute
-   *         was not set
+   * @return a list of the {@link CompressionCodec} classes
    */
    */
   public static List<Class<? extends CompressionCodec>> getCodecClasses(Configuration conf) {
   public static List<Class<? extends CompressionCodec>> getCodecClasses(Configuration conf) {
+    List<Class<? extends CompressionCodec>> result
+      = new ArrayList<Class<? extends CompressionCodec>>();
+    // Add codec classes discovered via service loading
+    for (CompressionCodec codec : CODEC_PROVIDERS) {
+      result.add(codec.getClass());
+    }
+    // Add codec classes from configuration
     String codecsString = conf.get("io.compression.codecs");
     String codecsString = conf.get("io.compression.codecs");
     if (codecsString != null) {
     if (codecsString != null) {
-      List<Class<? extends CompressionCodec>> result
-        = new ArrayList<Class<? extends CompressionCodec>>();
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       while (codecSplit.hasMoreElements()) {
       while (codecSplit.hasMoreElements()) {
         String codecSubstring = codecSplit.nextToken();
         String codecSubstring = codecSplit.nextToken();
@@ -123,14 +133,14 @@ public class CompressionCodecFactory {
           }
           }
         }
         }
       }
       }
-      return result;
-    } else {
-      return null;
     }
     }
+    return result;
   }
   }
   
   
   /**
   /**
-   * Sets a list of codec classes in the configuration.
+   * Sets a list of codec classes in the configuration. In addition to any
+   * classes specified using this method, {@link CompressionCodec} classes on
+   * the classpath are discovered using a Java ServiceLoader.
    * @param conf the configuration to modify
    * @param conf the configuration to modify
    * @param classes the list of classes to set
    * @param classes the list of classes to set
    */
    */
@@ -151,21 +161,19 @@ public class CompressionCodecFactory {
   
   
   /**
   /**
    * Find the codecs specified in the config value io.compression.codecs 
    * Find the codecs specified in the config value io.compression.codecs 
-   * and register them. Defaults to gzip and zip.
+   * and register them. Defaults to gzip and deflate.
    */
    */
   public CompressionCodecFactory(Configuration conf) {
   public CompressionCodecFactory(Configuration conf) {
     codecs = new TreeMap<String, CompressionCodec>();
     codecs = new TreeMap<String, CompressionCodec>();
     codecsByClassName = new HashMap<String, CompressionCodec>();
     codecsByClassName = new HashMap<String, CompressionCodec>();
     codecsByName = new HashMap<String, CompressionCodec>();
     codecsByName = new HashMap<String, CompressionCodec>();
     List<Class<? extends CompressionCodec>> codecClasses = getCodecClasses(conf);
     List<Class<? extends CompressionCodec>> codecClasses = getCodecClasses(conf);
-    if (codecClasses == null) {
+    if (codecClasses == null || codecClasses.isEmpty()) {
       addCodec(new GzipCodec());
       addCodec(new GzipCodec());
       addCodec(new DefaultCodec());      
       addCodec(new DefaultCodec());      
     } else {
     } else {
-      Iterator<Class<? extends CompressionCodec>> itr = codecClasses.iterator();
-      while (itr.hasNext()) {
-        CompressionCodec codec = ReflectionUtils.newInstance(itr.next(), conf);
-        addCodec(codec);     
+      for (Class<? extends CompressionCodec> codecClass : codecClasses) {
+        addCodec(ReflectionUtils.newInstance(codecClass, conf));
       }
       }
     }
     }
   }
   }

+ 36 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -50,8 +50,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.ipc.RpcPayloadHeader.*;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
@@ -163,10 +164,10 @@ public class Client {
     final Writable rpcRequest;  // the serialized rpc request - RpcPayload
     final Writable rpcRequest;  // the serialized rpc request - RpcPayload
     Writable rpcResponse;       // null if rpc has error
     Writable rpcResponse;       // null if rpc has error
     IOException error;          // exception, null if success
     IOException error;          // exception, null if success
-    final RpcKind rpcKind;      // Rpc EngineKind
+    final RPC.RpcKind rpcKind;      // Rpc EngineKind
     boolean done;               // true when call is done
     boolean done;               // true when call is done
 
 
-    protected Call(RpcKind rpcKind, Writable param) {
+    protected Call(RPC.RpcKind rpcKind, Writable param) {
       this.rpcKind = rpcKind;
       this.rpcKind = rpcKind;
       this.rpcRequest = param;
       this.rpcRequest = param;
       synchronized (Client.this) {
       synchronized (Client.this) {
@@ -613,7 +614,7 @@ public class Client {
             this.in = new DataInputStream(new BufferedInputStream(inStream));
             this.in = new DataInputStream(new BufferedInputStream(inStream));
           }
           }
           this.out = new DataOutputStream(new BufferedOutputStream(outStream));
           this.out = new DataOutputStream(new BufferedOutputStream(outStream));
-          writeHeader();
+          writeConnectionContext();
 
 
           // update last activity time
           // update last activity time
           touch();
           touch();
@@ -704,16 +705,17 @@ public class Client {
       out.flush();
       out.flush();
     }
     }
     
     
-    /* Write the protocol header for each connection
+    /* Write the connection context header for each connection
      * Out is not synchronized because only the first thread does this.
      * Out is not synchronized because only the first thread does this.
      */
      */
-    private void writeHeader() throws IOException {
+    private void writeConnectionContext() throws IOException {
       // Write out the ConnectionHeader
       // Write out the ConnectionHeader
       DataOutputBuffer buf = new DataOutputBuffer();
       DataOutputBuffer buf = new DataOutputBuffer();
       connectionContext.writeTo(buf);
       connectionContext.writeTo(buf);
       
       
       // Write out the payload length
       // Write out the payload length
       int bufLen = buf.getLength();
       int bufLen = buf.getLength();
+
       out.writeInt(bufLen);
       out.writeInt(bufLen);
       out.write(buf.getData(), 0, bufLen);
       out.write(buf.getData(), 0, bufLen);
     }
     }
@@ -806,21 +808,22 @@ public class Client {
           if (LOG.isDebugEnabled())
           if (LOG.isDebugEnabled())
             LOG.debug(getName() + " sending #" + call.id);
             LOG.debug(getName() + " sending #" + call.id);
           
           
-          //for serializing the
-          //data to be written
+          // Serializing the data to be written.
+          // Format:
+          // 0) Length of rest below (1 + 2)
+          // 1) PayloadHeader  - is serialized Delimited hence contains length
+          // 2) the Payload - the RpcRequest
+          //
           d = new DataOutputBuffer();
           d = new DataOutputBuffer();
-          d.writeInt(0); // placeholder for data length
-          RpcPayloadHeader header = new RpcPayloadHeader(
-              call.rpcKind, RpcPayloadOperation.RPC_FINAL_PAYLOAD, call.id);
-          header.write(d);
+          RpcPayloadHeaderProto header = ProtoUtil.makeRpcPayloadHeader(
+             call.rpcKind, RpcPayloadOperationProto.RPC_FINAL_PAYLOAD, call.id);
+          header.writeDelimitedTo(d);
           call.rpcRequest.write(d);
           call.rpcRequest.write(d);
           byte[] data = d.getData();
           byte[] data = d.getData();
-          int dataLength = d.getLength() - 4;
-          data[0] = (byte)((dataLength >>> 24) & 0xff);
-          data[1] = (byte)((dataLength >>> 16) & 0xff);
-          data[2] = (byte)((dataLength >>> 8) & 0xff);
-          data[3] = (byte)(dataLength & 0xff);
-          out.write(data, 0, dataLength + 4);//write the data
+   
+          int totalLength = d.getLength();
+          out.writeInt(totalLength); // Total Length
+          out.write(data, 0, totalLength);//PayloadHeader + RpcRequest
           out.flush();
           out.flush();
         }
         }
       } catch(IOException e) {
       } catch(IOException e) {
@@ -937,7 +940,7 @@ public class Client {
     private int index;
     private int index;
     
     
     public ParallelCall(Writable param, ParallelResults results, int index) {
     public ParallelCall(Writable param, ParallelResults results, int index) {
-      super(RpcKind.RPC_WRITABLE, param);
+      super(RPC.RpcKind.RPC_WRITABLE, param);
       this.results = results;
       this.results = results;
       this.index = index;
       this.index = index;
     }
     }
@@ -1022,22 +1025,22 @@ public class Client {
   }
   }
 
 
   /**
   /**
-   * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
+   * Same as {@link #call(RPC.RpcKind, Writable, ConnectionId)}
    *  for RPC_BUILTIN
    *  for RPC_BUILTIN
    */
    */
   public Writable call(Writable param, InetSocketAddress address)
   public Writable call(Writable param, InetSocketAddress address)
   throws InterruptedException, IOException {
   throws InterruptedException, IOException {
-    return call(RpcKind.RPC_BUILTIN, param, address);
+    return call(RPC.RpcKind.RPC_BUILTIN, param, address);
     
     
   }
   }
   /** Make a call, passing <code>param</code>, to the IPC server running at
   /** Make a call, passing <code>param</code>, to the IPC server running at
    * <code>address</code>, returning the value.  Throws exceptions if there are
    * <code>address</code>, returning the value.  Throws exceptions if there are
    * network problems or if the remote code threw an exception.
    * network problems or if the remote code threw an exception.
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
+   * @deprecated Use {@link #call(RPC.RpcKind, Writable,
    *  ConnectionId)} instead 
    *  ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress address)
+  public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress address)
   throws InterruptedException, IOException {
   throws InterruptedException, IOException {
       return call(rpcKind, param, address, null);
       return call(rpcKind, param, address, null);
   }
   }
@@ -1047,11 +1050,11 @@ public class Client {
    * the value.  
    * the value.  
    * Throws exceptions if there are network problems or if the remote code 
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception.
    * threw an exception.
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable, 
+   * @deprecated Use {@link #call(RPC.RpcKind, Writable, 
    * ConnectionId)} instead 
    * ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
       UserGroupInformation ticket)  
       UserGroupInformation ticket)  
       throws InterruptedException, IOException {
       throws InterruptedException, IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
@@ -1065,11 +1068,11 @@ public class Client {
    * timeout, returning the value.  
    * timeout, returning the value.  
    * Throws exceptions if there are network problems or if the remote code 
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception. 
    * threw an exception. 
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
+   * @deprecated Use {@link #call(RPC.RpcKind, Writable,
    *  ConnectionId)} instead 
    *  ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
                        int rpcTimeout)  
                        int rpcTimeout)  
                        throws InterruptedException, IOException {
                        throws InterruptedException, IOException {
@@ -1080,7 +1083,7 @@ public class Client {
 
 
   
   
   /**
   /**
-   * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, InetSocketAddress, 
+   * Same as {@link #call(RPC.RpcKind, Writable, InetSocketAddress, 
    * Class, UserGroupInformation, int, Configuration)}
    * Class, UserGroupInformation, int, Configuration)}
    * except that rpcKind is writable.
    * except that rpcKind is writable.
    */
    */
@@ -1090,7 +1093,7 @@ public class Client {
       throws InterruptedException, IOException {
       throws InterruptedException, IOException {
         ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
-    return call(RpcKind.RPC_BUILTIN, param, remoteId);
+    return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
   }
   }
   
   
   /**
   /**
@@ -1101,7 +1104,7 @@ public class Client {
    * value. Throws exceptions if there are network problems or if the remote
    * value. Throws exceptions if there are network problems or if the remote
    * code threw an exception.
    * code threw an exception.
    */
    */
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
                        int rpcTimeout, Configuration conf)  
                        int rpcTimeout, Configuration conf)  
                        throws InterruptedException, IOException {
                        throws InterruptedException, IOException {
@@ -1111,12 +1114,12 @@ public class Client {
   }
   }
   
   
   /**
   /**
-   * Same as {link {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
+   * Same as {link {@link #call(RPC.RpcKind, Writable, ConnectionId)}
    * except the rpcKind is RPC_BUILTIN
    * except the rpcKind is RPC_BUILTIN
    */
    */
   public Writable call(Writable param, ConnectionId remoteId)  
   public Writable call(Writable param, ConnectionId remoteId)  
       throws InterruptedException, IOException {
       throws InterruptedException, IOException {
-     return call(RpcKind.RPC_BUILTIN, param, remoteId);
+     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
   }
   }
   
   
   /** 
   /** 
@@ -1130,7 +1133,7 @@ public class Client {
    * Throws exceptions if there are network problems or if the remote code 
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception.
    * threw an exception.
    */
    */
-  public Writable call(RpcKind rpcKind, Writable rpcRequest,
+  public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
       ConnectionId remoteId) throws InterruptedException, IOException {
       ConnectionId remoteId) throws InterruptedException, IOException {
     Call call = new Call(rpcKind, rpcRequest);
     Call call = new Call(rpcKind, rpcRequest);
     Connection connection = getConnection(remoteId, call);
     Connection connection = getConnection(remoteId, call);

+ 5 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -38,7 +38,6 @@ import org.apache.hadoop.io.DataOutputOutputStream;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 
 
 import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
 import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -61,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   
   
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
-        RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
         new Server.ProtoBufRpcInvoker());
         new Server.ProtoBufRpcInvoker());
   }
   }
 
 
@@ -182,7 +181,7 @@ public class ProtobufRpcEngine implements RpcEngine {
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
       HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
       RpcResponseWritable val = null;
       RpcResponseWritable val = null;
       try {
       try {
-        val = (RpcResponseWritable) client.call(RpcKind.RPC_PROTOCOL_BUFFER,
+        val = (RpcResponseWritable) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
             new RpcRequestWritable(rpcRequest), remoteId);
             new RpcRequestWritable(rpcRequest), remoteId);
       } catch (Throwable e) {
       } catch (Throwable e) {
         throw new ServiceException(e);
         throw new ServiceException(e);
@@ -351,7 +350,7 @@ public class ProtobufRpcEngine implements RpcEngine {
           numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
           numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
               .getClass().getName()), secretManager, portRangeConfig);
               .getClass().getName()), secretManager, portRangeConfig);
       this.verbose = verbose;  
       this.verbose = verbose;  
-      registerProtocolAndImpl(RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
+      registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
           protocolImpl);
           protocolImpl);
     }
     }
     
     
@@ -363,10 +362,10 @@ public class ProtobufRpcEngine implements RpcEngine {
           String protoName, long version) throws IOException {
           String protoName, long version) throws IOException {
         ProtoNameVer pv = new ProtoNameVer(protoName, version);
         ProtoNameVer pv = new ProtoNameVer(protoName, version);
         ProtoClassProtoImpl impl = 
         ProtoClassProtoImpl impl = 
-            server.getProtocolImplMap(RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
+            server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
         if (impl == null) { // no match for Protocol AND Version
         if (impl == null) { // no match for Protocol AND Version
           VerProtocolImpl highest = 
           VerProtocolImpl highest = 
-              server.getHighestSupportedProtocol(RpcKind.RPC_PROTOCOL_BUFFER, 
+              server.getHighestSupportedProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, 
                   protoName);
                   protoName);
           if (highest == null) {
           if (highest == null) {
             throw new IOException("Unknown protocol: " + protoName);
             throw new IOException("Unknown protocol: " + protoName);

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
 import org.apache.hadoop.ipc.RPC.Server.VerProtocolImpl;
 import org.apache.hadoop.ipc.RPC.Server.VerProtocolImpl;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsRequestProto;
@@ -49,7 +48,7 @@ public class ProtocolMetaInfoServerSideTranslatorPB implements
     String protocol = request.getProtocol();
     String protocol = request.getProtocol();
     GetProtocolVersionsResponseProto.Builder builder = 
     GetProtocolVersionsResponseProto.Builder builder = 
         GetProtocolVersionsResponseProto.newBuilder();
         GetProtocolVersionsResponseProto.newBuilder();
-    for (RpcKind r : RpcKind.values()) {
+    for (RPC.RpcKind r : RPC.RpcKind.values()) {
       long[] versions;
       long[] versions;
       try {
       try {
         versions = getProtocolVersionForRpcKind(r, protocol);
         versions = getProtocolVersionForRpcKind(r, protocol);
@@ -78,7 +77,7 @@ public class ProtocolMetaInfoServerSideTranslatorPB implements
     String rpcKind = request.getRpcKind();
     String rpcKind = request.getRpcKind();
     long[] versions;
     long[] versions;
     try {
     try {
-      versions = getProtocolVersionForRpcKind(RpcKind.valueOf(rpcKind),
+      versions = getProtocolVersionForRpcKind(RPC.RpcKind.valueOf(rpcKind),
           protocol);
           protocol);
     } catch (ClassNotFoundException e1) {
     } catch (ClassNotFoundException e1) {
       throw new ServiceException(e1);
       throw new ServiceException(e1);
@@ -104,7 +103,7 @@ public class ProtocolMetaInfoServerSideTranslatorPB implements
     return builder.build();
     return builder.build();
   }
   }
   
   
-  private long[] getProtocolVersionForRpcKind(RpcKind rpcKind,
+  private long[] getProtocolVersionForRpcKind(RPC.RpcKind rpcKind,
       String protocol) throws ClassNotFoundException {
       String protocol) throws ClassNotFoundException {
     Class<?> protocolClass = Class.forName(protocol);
     Class<?> protocolClass = Class.forName(protocol);
     String protocolName = RPC.getProtocolName(protocolClass);
     String protocolName = RPC.getProtocolName(protocolClass);

+ 15 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -42,7 +42,6 @@ import org.apache.commons.logging.*;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer;
@@ -73,6 +72,18 @@ import com.google.protobuf.BlockingService;
  * the protocol instance is transmitted.
  * the protocol instance is transmitted.
  */
  */
 public class RPC {
 public class RPC {
+  public enum RpcKind {
+    RPC_BUILTIN ((short) 1),         // Used for built in calls by tests
+    RPC_WRITABLE ((short) 2),        // Use WritableRpcEngine 
+    RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
+    final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
+    private static final short FIRST_INDEX = RPC_BUILTIN.value;    
+    public final short value; //TODO make it private
+
+    RpcKind(short val) {
+      this.value = val;
+    } 
+  }
   
   
   interface RpcInvoker {   
   interface RpcInvoker {   
     /**
     /**
@@ -777,7 +788,7 @@ public class RPC {
    ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray = 
    ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray = 
        new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);
        new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);
    
    
-   Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RpcKind rpcKind) {
+   Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RPC.RpcKind rpcKind) {
      if (protocolImplMapArray.size() == 0) {// initialize for all rpc kinds
      if (protocolImplMapArray.size() == 0) {// initialize for all rpc kinds
        for (int i=0; i <= RpcKind.MAX_INDEX; ++i) {
        for (int i=0; i <= RpcKind.MAX_INDEX; ++i) {
          protocolImplMapArray.add(
          protocolImplMapArray.add(
@@ -821,7 +832,7 @@ public class RPC {
    
    
    
    
    @SuppressWarnings("unused") // will be useful later.
    @SuppressWarnings("unused") // will be useful later.
-   VerProtocolImpl[] getSupportedProtocolVersions(RpcKind rpcKind,
+   VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind,
        String protocolName) {
        String protocolName) {
      VerProtocolImpl[] resultk = 
      VerProtocolImpl[] resultk = 
          new  VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
          new  VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
@@ -900,7 +911,7 @@ public class RPC {
     }
     }
     
     
     @Override
     @Override
-    public Writable call(RpcKind rpcKind, String protocol,
+    public Writable call(RPC.RpcKind rpcKind, String protocol,
         Writable rpcRequest, long receiveTime) throws Exception {
         Writable rpcRequest, long receiveTime) throws Exception {
       return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
       return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
           receiveTime);
           receiveTime);

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java

@@ -27,7 +27,6 @@ import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
@@ -107,7 +106,7 @@ public class RpcClientUtil {
    * @throws IOException
    * @throws IOException
    */
    */
   public static boolean isMethodSupported(Object rpcProxy, Class<?> protocol,
   public static boolean isMethodSupported(Object rpcProxy, Class<?> protocol,
-      RpcKind rpcKind, long version, String methodName) throws IOException {
+      RPC.RpcKind rpcKind, long version, String methodName) throws IOException {
     InetSocketAddress serverAddress = RPC.getServerAddress(rpcProxy);
     InetSocketAddress serverAddress = RPC.getServerAddress(rpcProxy);
     Map<Long, ProtocolSignature> versionMap = getVersionSignatureMap(
     Map<Long, ProtocolSignature> versionMap = getVersionSignatureMap(
         serverAddress, protocol.getName(), rpcKind.toString());
         serverAddress, protocol.getName(), rpcKind.toString());

+ 0 - 118
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java

@@ -1,118 +0,0 @@
-package org.apache.hadoop.ipc;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * This is the rpc payload header. It is sent with every rpc call
- * <pre>
- * The format of RPC call is as follows:
- * +---------------------------------------------------+
- * |  Rpc length in bytes (header + payload length)    |
- * +---------------------------------------------------+
- * |      Rpc Header       |       Rpc Payload         |
- * +---------------------------------------------------+
- * 
- * The format of Rpc Header is:
- * +----------------------------------+
- * |  RpcKind (1 bytes)               |      
- * +----------------------------------+
- * |  RpcPayloadOperation (1 bytes)   |      
- * +----------------------------------+
- * |  Call ID (4 bytes)               |      
- * +----------------------------------+
- * 
- * {@link RpcKind} determines the type of serialization used for Rpc Payload.
- * </pre>
- * <p>
- * <b>Note this header does NOT have its own version number, 
- * it used the version number from the connection header. </b>
- */
-public class RpcPayloadHeader implements Writable {
-  public enum RpcPayloadOperation {
-    RPC_FINAL_PAYLOAD ((short)1),
-    RPC_CONTINUATION_PAYLOAD ((short)2), // not implemented yet
-    RPC_CLOSE_CONNECTION ((short)3);     // close the rpc connection
-    
-    private final short code;
-    private static final short FIRST_INDEX = RPC_FINAL_PAYLOAD.code;
-    RpcPayloadOperation(short val) {
-      this.code = val;
-    }
-    
-    public void write(DataOutput out) throws IOException {  
-      out.writeByte(code);
-    }
-    
-    static RpcPayloadOperation readFields(DataInput in) throws IOException {
-      short inValue = in.readByte();
-      return RpcPayloadOperation.values()[inValue - FIRST_INDEX];
-    }
-  }
-  
-  public enum RpcKind {
-    RPC_BUILTIN ((short) 1),         // Used for built in calls by tests
-    RPC_WRITABLE ((short) 2),        // Use WritableRpcEngine 
-    RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
-    final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
-    private static final short FIRST_INDEX = RPC_BUILTIN.value;    
-    private final short value;
-
-    RpcKind(short val) {
-      this.value = val;
-    }
-    
-    public void write(DataOutput out) throws IOException {
-      out.writeByte(value);
-    }
-    
-    static RpcKind readFields(DataInput in) throws IOException {
-      short inValue = in.readByte();
-      return RpcKind.values()[inValue - FIRST_INDEX];
-    }  
-  }
-  
-  private RpcKind kind;
-  private RpcPayloadOperation operation;
-  private int callId;
-  
-  public RpcPayloadHeader() {
-    kind = RpcKind.RPC_WRITABLE;
-    operation = RpcPayloadOperation.RPC_CLOSE_CONNECTION;
-  }
-  
-  public RpcPayloadHeader(RpcKind kind, RpcPayloadOperation op, int callId) {
-    this.kind  = kind;
-    this.operation = op;
-    this.callId = callId;
-  }
-  
-  int getCallId() {
-    return callId;
-  }
-  
-  RpcKind getkind() {
-    return kind;
-  }
-  
-  RpcPayloadOperation getOperation() {
-    return operation;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    kind.write(out);
-    operation.write(out);
-    out.writeInt(callId); 
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    kind = RpcKind.readFields(in);
-    operation = RpcPayloadOperation.readFields(in);
-    this.callId = in.readInt();
-  }
-}

+ 29 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -72,11 +72,10 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcMetrics;
 import org.apache.hadoop.ipc.metrics.RpcMetrics;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.*;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer;
@@ -170,8 +169,8 @@ public abstract class Server {
       this.rpcRequestWrapperClass = rpcRequestWrapperClass;
       this.rpcRequestWrapperClass = rpcRequestWrapperClass;
     }   
     }   
   }
   }
-  static Map<RpcKind, RpcKindMapValue> rpcKindMap = new
-      HashMap<RpcKind, RpcKindMapValue>(4);
+  static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
+      HashMap<RPC.RpcKind, RpcKindMapValue>(4);
   
   
   
   
 
 
@@ -185,7 +184,7 @@ public abstract class Server {
    *  @param rpcInvoker - use to process the calls on SS.
    *  @param rpcInvoker - use to process the calls on SS.
    */
    */
   
   
-  public static void registerProtocolEngine(RpcKind rpcKind, 
+  public static void registerProtocolEngine(RPC.RpcKind rpcKind, 
           Class<? extends Writable> rpcRequestWrapperClass,
           Class<? extends Writable> rpcRequestWrapperClass,
           RpcInvoker rpcInvoker) {
           RpcInvoker rpcInvoker) {
     RpcKindMapValue  old = 
     RpcKindMapValue  old = 
@@ -201,14 +200,14 @@ public abstract class Server {
   }
   }
   
   
   public Class<? extends Writable> getRpcRequestWrapper(
   public Class<? extends Writable> getRpcRequestWrapper(
-      RpcKind rpcKind) {
+      RpcKindProto rpcKind) {
     if (rpcRequestClass != null)
     if (rpcRequestClass != null)
        return rpcRequestClass;
        return rpcRequestClass;
-    RpcKindMapValue val = rpcKindMap.get(rpcKind);
+    RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind));
     return (val == null) ? null : val.rpcRequestWrapperClass; 
     return (val == null) ? null : val.rpcRequestWrapperClass; 
   }
   }
   
   
-  public static RpcInvoker  getRpcInvoker(RpcKind rpcKind) {
+  public static RpcInvoker  getRpcInvoker(RPC.RpcKind rpcKind) {
     RpcKindMapValue val = rpcKindMap.get(rpcKind);
     RpcKindMapValue val = rpcKindMap.get(rpcKind);
     return (val == null) ? null : val.rpcInvoker; 
     return (val == null) ? null : val.rpcInvoker; 
   }
   }
@@ -403,12 +402,12 @@ public abstract class Server {
     private long timestamp;               // time received when response is null
     private long timestamp;               // time received when response is null
                                           // time served when response is not null
                                           // time served when response is not null
     private ByteBuffer rpcResponse;       // the response for this call
     private ByteBuffer rpcResponse;       // the response for this call
-    private final RpcKind rpcKind;
+    private final RPC.RpcKind rpcKind;
 
 
     public Call(int id, Writable param, Connection connection) {
     public Call(int id, Writable param, Connection connection) {
-      this( id,  param,  connection, RpcKind.RPC_BUILTIN );    
+      this( id,  param,  connection, RPC.RpcKind.RPC_BUILTIN );    
     }
     }
-    public Call(int id, Writable param, Connection connection, RpcKind kind) { 
+    public Call(int id, Writable param, Connection connection, RPC.RpcKind kind) { 
       this.callId = id;
       this.callId = id;
       this.rpcRequest = param;
       this.rpcRequest = param;
       this.connection = connection;
       this.connection = connection;
@@ -1366,7 +1365,6 @@ public abstract class Server {
         if (data == null) {
         if (data == null) {
           dataLengthBuffer.flip();
           dataLengthBuffer.flip();
           dataLength = dataLengthBuffer.getInt();
           dataLength = dataLengthBuffer.getInt();
-       
           if ((dataLength == Client.PING_CALL_ID) && (!useWrap)) {
           if ((dataLength == Client.PING_CALL_ID) && (!useWrap)) {
             // covers the !useSasl too
             // covers the !useSasl too
             dataLengthBuffer.clear();
             dataLengthBuffer.clear();
@@ -1555,22 +1553,27 @@ public abstract class Server {
     private void processData(byte[] buf) throws  IOException, InterruptedException {
     private void processData(byte[] buf) throws  IOException, InterruptedException {
       DataInputStream dis =
       DataInputStream dis =
         new DataInputStream(new ByteArrayInputStream(buf));
         new DataInputStream(new ByteArrayInputStream(buf));
-      RpcPayloadHeader header = new RpcPayloadHeader();
-      header.readFields(dis);           // Read the RpcPayload header
+      RpcPayloadHeaderProto header = RpcPayloadHeaderProto.parseDelimitedFrom(dis);
         
         
       if (LOG.isDebugEnabled())
       if (LOG.isDebugEnabled())
         LOG.debug(" got #" + header.getCallId());
         LOG.debug(" got #" + header.getCallId());
-      if (header.getOperation() != RpcPayloadOperation.RPC_FINAL_PAYLOAD) {
+      if (!header.hasRpcOp()) {
+        throw new IOException(" IPC Server: No rpc op in rpcPayloadHeader");
+      }
+      if (header.getRpcOp() != RpcPayloadOperationProto.RPC_FINAL_PAYLOAD) {
         throw new IOException("IPC Server does not implement operation" + 
         throw new IOException("IPC Server does not implement operation" + 
-              header.getOperation());
+              header.getRpcOp());
       }
       }
       // If we know the rpc kind, get its class so that we can deserialize
       // If we know the rpc kind, get its class so that we can deserialize
       // (Note it would make more sense to have the handler deserialize but 
       // (Note it would make more sense to have the handler deserialize but 
       // we continue with this original design.
       // we continue with this original design.
+      if (!header.hasRpcKind()) {
+        throw new IOException(" IPC Server: No rpc kind in rpcPayloadHeader");
+      }
       Class<? extends Writable> rpcRequestClass = 
       Class<? extends Writable> rpcRequestClass = 
-          getRpcRequestWrapper(header.getkind());
+          getRpcRequestWrapper(header.getRpcKind());
       if (rpcRequestClass == null) {
       if (rpcRequestClass == null) {
-        LOG.warn("Unknown rpc kind "  + header.getkind() + 
+        LOG.warn("Unknown rpc kind "  + header.getRpcKind() + 
             " from client " + getHostAddress());
             " from client " + getHostAddress());
         final Call readParamsFailedCall = 
         final Call readParamsFailedCall = 
             new Call(header.getCallId(), null, this);
             new Call(header.getCallId(), null, this);
@@ -1578,7 +1581,7 @@ public abstract class Server {
 
 
         setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
         setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
             IOException.class.getName(),
             IOException.class.getName(),
-            "Unknown rpc kind "  + header.getkind());
+            "Unknown rpc kind "  + header.getRpcKind());
         responder.doRespond(readParamsFailedCall);
         responder.doRespond(readParamsFailedCall);
         return;   
         return;   
       }
       }
@@ -1589,7 +1592,7 @@ public abstract class Server {
       } catch (Throwable t) {
       } catch (Throwable t) {
         LOG.warn("Unable to read call parameters for client " +
         LOG.warn("Unable to read call parameters for client " +
                  getHostAddress() + "on connection protocol " +
                  getHostAddress() + "on connection protocol " +
-            this.protocolName + " for rpcKind " + header.getkind(),  t);
+            this.protocolName + " for rpcKind " + header.getRpcKind(),  t);
         final Call readParamsFailedCall = 
         final Call readParamsFailedCall = 
             new Call(header.getCallId(), null, this);
             new Call(header.getCallId(), null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
@@ -1601,7 +1604,8 @@ public abstract class Server {
         return;
         return;
       }
       }
         
         
-      Call call = new Call(header.getCallId(), rpcRequest, this, header.getkind());
+      Call call = new Call(header.getCallId(), rpcRequest, this, 
+          ProtoUtil.convert(header.getRpcKind()));
       callQueue.put(call);              // queue the call; maybe blocked here
       callQueue.put(call);              // queue the call; maybe blocked here
       incRpcCount();  // Increment the rpc count
       incRpcCount();  // Increment the rpc count
     }
     }
@@ -1772,7 +1776,7 @@ public abstract class Server {
    * from configuration. Otherwise the configuration will be picked up.
    * from configuration. Otherwise the configuration will be picked up.
    * 
    * 
    * If rpcRequestClass is null then the rpcRequestClass must have been 
    * If rpcRequestClass is null then the rpcRequestClass must have been 
-   * registered via {@link #registerProtocolEngine(RpcPayloadHeader.RpcKind,
+   * registered via {@link #registerProtocolEngine(RPC.RpcKind,
    *  Class, RPC.RpcInvoker)}
    *  Class, RPC.RpcInvoker)}
    * This parameter has been retained for compatibility with existing tests
    * This parameter has been retained for compatibility with existing tests
    * and usage.
    * and usage.
@@ -1986,16 +1990,16 @@ public abstract class Server {
   
   
   /** 
   /** 
    * Called for each call. 
    * Called for each call. 
-   * @deprecated Use  {@link #call(RpcPayloadHeader.RpcKind, String,
+   * @deprecated Use  {@link #call(RPC.RpcKind, String,
    *  Writable, long)} instead
    *  Writable, long)} instead
    */
    */
   @Deprecated
   @Deprecated
   public Writable call(Writable param, long receiveTime) throws Exception {
   public Writable call(Writable param, long receiveTime) throws Exception {
-    return call(RpcKind.RPC_BUILTIN, null, param, receiveTime);
+    return call(RPC.RpcKind.RPC_BUILTIN, null, param, receiveTime);
   }
   }
   
   
   /** Called for each call. */
   /** Called for each call. */
-  public abstract Writable call(RpcKind rpcKind, String protocol,
+  public abstract Writable call(RPC.RpcKind rpcKind, String protocol,
       Writable param, long receiveTime) throws Exception;
       Writable param, long receiveTime) throws Exception;
   
   
   /**
   /**

+ 7 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -33,7 +33,6 @@ import org.apache.commons.logging.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
@@ -75,7 +74,7 @@ public class WritableRpcEngine implements RpcEngine {
    * Register the rpcRequest deserializer for WritableRpcEngine
    * Register the rpcRequest deserializer for WritableRpcEngine
    */
    */
   private static synchronized void initialize() {
   private static synchronized void initialize() {
-    org.apache.hadoop.ipc.Server.registerProtocolEngine(RpcKind.RPC_WRITABLE,
+    org.apache.hadoop.ipc.Server.registerProtocolEngine(RPC.RpcKind.RPC_WRITABLE,
         Invocation.class, new Server.WritableRpcInvoker());
         Invocation.class, new Server.WritableRpcInvoker());
     isInitialized = true;
     isInitialized = true;
   }
   }
@@ -223,7 +222,7 @@ public class WritableRpcEngine implements RpcEngine {
       }
       }
 
 
       ObjectWritable value = (ObjectWritable)
       ObjectWritable value = (ObjectWritable)
-        client.call(RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
+        client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         long callTime = System.currentTimeMillis() - startTime;
         long callTime = System.currentTimeMillis() - startTime;
         LOG.debug("Call: " + method.getName() + " " + callTime);
         LOG.debug("Call: " + method.getName() + " " + callTime);
@@ -412,12 +411,12 @@ public class WritableRpcEngine implements RpcEngine {
               protocolImpl.getClass());
               protocolImpl.getClass());
         }
         }
         // register protocol class and its super interfaces
         // register protocol class and its super interfaces
-        registerProtocolAndImpl(RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
+        registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
         protocols = RPC.getProtocolInterfaces(protocolClass);
         protocols = RPC.getProtocolInterfaces(protocolClass);
       }
       }
       for (Class<?> p : protocols) {
       for (Class<?> p : protocols) {
         if (!p.equals(VersionedProtocol.class)) {
         if (!p.equals(VersionedProtocol.class)) {
-          registerProtocolAndImpl(RpcKind.RPC_WRITABLE, p, protocolImpl);
+          registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, p, protocolImpl);
         }
         }
       }
       }
 
 
@@ -461,7 +460,7 @@ public class WritableRpcEngine implements RpcEngine {
             // registered directly.
             // registered directly.
             // Send the call to the highest  protocol version
             // Send the call to the highest  protocol version
             VerProtocolImpl highest = server.getHighestSupportedProtocol(
             VerProtocolImpl highest = server.getHighestSupportedProtocol(
-                RpcKind.RPC_WRITABLE, protocolName);
+                RPC.RpcKind.RPC_WRITABLE, protocolName);
             if (highest == null) {
             if (highest == null) {
               throw new IOException("Unknown protocol: " + protocolName);
               throw new IOException("Unknown protocol: " + protocolName);
             }
             }
@@ -473,10 +472,10 @@ public class WritableRpcEngine implements RpcEngine {
             ProtoNameVer pv = 
             ProtoNameVer pv = 
                 new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
                 new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
             protocolImpl = 
             protocolImpl = 
-                server.getProtocolImplMap(RpcKind.RPC_WRITABLE).get(pv);
+                server.getProtocolImplMap(RPC.RpcKind.RPC_WRITABLE).get(pv);
             if (protocolImpl == null) { // no match for Protocol AND Version
             if (protocolImpl == null) { // no match for Protocol AND Version
                VerProtocolImpl highest = 
                VerProtocolImpl highest = 
-                   server.getHighestSupportedProtocol(RpcKind.RPC_WRITABLE, 
+                   server.getHighestSupportedProtocol(RPC.RpcKind.RPC_WRITABLE, 
                        protoName);
                        protoName);
               if (highest == null) {
               if (highest == null) {
                 throw new IOException("Unknown protocol: " + protoName);
                 throw new IOException("Unknown protocol: " + protoName);

+ 3 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -36,11 +36,9 @@ import org.apache.hadoop.util.ServletUtil;
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class LogLevel {
 public class LogLevel {
-  public static final String USAGES = "\nUSAGES:\n"
-    + "java " + LogLevel.class.getName()
-    + " -getlevel <host:port> <name>\n"
-    + "java " + LogLevel.class.getName()
-    + " -setlevel <host:port> <name> <level>\n";
+  public static final String USAGES = "\nUsage: General options are:\n"
+      + "\t[-getlevel <host:httpPort> <name>]\n"
+      + "\t[-setlevel <host:httpPort> <name> <level>]\n";
 
 
   /**
   /**
    * A command line implementation
    * A command line implementation

+ 13 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -351,8 +351,19 @@ public class NetUtils {
    * @return socket address that a client can use to connect to the server.
    * @return socket address that a client can use to connect to the server.
    */
    */
   public static InetSocketAddress getConnectAddress(Server server) {
   public static InetSocketAddress getConnectAddress(Server server) {
-    InetSocketAddress addr = server.getListenerAddress();
-    if (addr.getAddress().isAnyLocalAddress()) {
+    return getConnectAddress(server.getListenerAddress());
+  }
+  
+  /**
+   * Returns the InetSocketAddress that a client can use to connect to the
+   * given listening address.  This returns "hostname:port" of the server,
+   * or "127.0.0.1:port" when given a wildcard address of "0.0.0.0:port".
+   * 
+   * @param addr of a listener
+   * @return socket address that a client can use to connect to the server.
+   */
+  public static InetSocketAddress getConnectAddress(InetSocketAddress addr) {
+    if (!addr.isUnresolved() && addr.getAddress().isAnyLocalAddress()) {
       try {
       try {
         addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort());
         addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort());
       } catch (UnknownHostException uhe) {
       } catch (UnknownHostException uhe) {

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -220,6 +220,8 @@ public class SecurityUtil {
    * @return converted Kerberos principal name
    * @return converted Kerberos principal name
    * @throws IOException if the client address cannot be determined
    * @throws IOException if the client address cannot be determined
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static String getServerPrincipal(String principalConfig,
   public static String getServerPrincipal(String principalConfig,
       String hostname) throws IOException {
       String hostname) throws IOException {
     String[] components = getComponents(principalConfig);
     String[] components = getComponents(principalConfig);
@@ -245,6 +247,8 @@ public class SecurityUtil {
    * @return converted Kerberos principal name
    * @return converted Kerberos principal name
    * @throws IOException if the client address cannot be determined
    * @throws IOException if the client address cannot be determined
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static String getServerPrincipal(String principalConfig,
   public static String getServerPrincipal(String principalConfig,
       InetAddress addr) throws IOException {
       InetAddress addr) throws IOException {
     String[] components = getComponents(principalConfig);
     String[] components = getComponents(principalConfig);
@@ -292,6 +296,8 @@ public class SecurityUtil {
    *          the key to look for user's Kerberos principal name in conf
    *          the key to look for user's Kerberos principal name in conf
    * @throws IOException if login fails
    * @throws IOException if login fails
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static void login(final Configuration conf,
   public static void login(final Configuration conf,
       final String keytabFileKey, final String userNameKey) throws IOException {
       final String keytabFileKey, final String userNameKey) throws IOException {
     login(conf, keytabFileKey, userNameKey, getLocalHostName());
     login(conf, keytabFileKey, userNameKey, getLocalHostName());
@@ -312,6 +318,8 @@ public class SecurityUtil {
    *          hostname to use for substitution
    *          hostname to use for substitution
    * @throws IOException if the config doesn't specify a keytab
    * @throws IOException if the config doesn't specify a keytab
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static void login(final Configuration conf,
   public static void login(final Configuration conf,
       final String keytabFileKey, final String userNameKey, String hostname)
       final String keytabFileKey, final String userNameKey, String hostname)
       throws IOException {
       throws IOException {

+ 30 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -69,7 +69,7 @@ import org.apache.hadoop.util.Shell;
  * user's username and groups. It supports both the Windows, Unix and Kerberos 
  * user's username and groups. It supports both the Windows, Unix and Kerberos 
  * login modules.
  * login modules.
  */
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class UserGroupInformation {
 public class UserGroupInformation {
   private static final Log LOG =  LogFactory.getLog(UserGroupInformation.class);
   private static final Log LOG =  LogFactory.getLog(UserGroupInformation.class);
@@ -258,6 +258,8 @@ public class UserGroupInformation {
    * group look up service.
    * group look up service.
    * @param conf the configuration to use
    * @param conf the configuration to use
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static void setConfiguration(Configuration conf) {
   public static void setConfiguration(Configuration conf) {
     initialize(conf, false);
     initialize(conf, false);
   }
   }
@@ -500,6 +502,8 @@ public class UserGroupInformation {
    * @return the current user
    * @return the current user
    * @throws IOException if login fails
    * @throws IOException if login fails
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized
   public synchronized
   static UserGroupInformation getCurrentUser() throws IOException {
   static UserGroupInformation getCurrentUser() throws IOException {
     AccessControlContext context = AccessController.getContext();
     AccessControlContext context = AccessController.getContext();
@@ -516,6 +520,8 @@ public class UserGroupInformation {
    * @return the logged in user
    * @return the logged in user
    * @throws IOException if login fails
    * @throws IOException if login fails
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized 
   public synchronized 
   static UserGroupInformation getLoginUser() throws IOException {
   static UserGroupInformation getLoginUser() throws IOException {
     if (loginUser == null) {
     if (loginUser == null) {
@@ -652,6 +658,8 @@ public class UserGroupInformation {
    * @param path the path to the keytab file
    * @param path the path to the keytab file
    * @throws IOException if the keytab file can't be read
    * @throws IOException if the keytab file can't be read
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized
   public synchronized
   static void loginUserFromKeytab(String user,
   static void loginUserFromKeytab(String user,
                                   String path
                                   String path
@@ -710,6 +718,8 @@ public class UserGroupInformation {
    * the new credentials.
    * the new credentials.
    * @throws IOException on a failure
    * @throws IOException on a failure
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized void reloginFromKeytab()
   public synchronized void reloginFromKeytab()
   throws IOException {
   throws IOException {
     if (!isSecurityEnabled() ||
     if (!isSecurityEnabled() ||
@@ -769,6 +779,8 @@ public class UserGroupInformation {
    * the new credentials.
    * the new credentials.
    * @throws IOException on a failure
    * @throws IOException on a failure
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized void reloginFromTicketCache()
   public synchronized void reloginFromTicketCache()
   throws IOException {
   throws IOException {
     if (!isSecurityEnabled() || 
     if (!isSecurityEnabled() || 
@@ -867,6 +879,8 @@ public class UserGroupInformation {
    * Did the login happen via keytab
    * Did the login happen via keytab
    * @return true or false
    * @return true or false
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public synchronized static boolean isLoginKeytabBased() throws IOException {
   public synchronized static boolean isLoginKeytabBased() throws IOException {
     return getLoginUser().isKeytab;
     return getLoginUser().isKeytab;
   }
   }
@@ -877,6 +891,8 @@ public class UserGroupInformation {
    * @param user the full user principal name, must not be empty or null
    * @param user the full user principal name, must not be empty or null
    * @return the UserGroupInformation for the remote user.
    * @return the UserGroupInformation for the remote user.
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static UserGroupInformation createRemoteUser(String user) {
   public static UserGroupInformation createRemoteUser(String user) {
     if (user == null || "".equals(user)) {
     if (user == null || "".equals(user)) {
       throw new IllegalArgumentException("Null user");
       throw new IllegalArgumentException("Null user");
@@ -891,6 +907,7 @@ public class UserGroupInformation {
   /**
   /**
    * existing types of authentications' methods
    * existing types of authentications' methods
    */
    */
+  @InterfaceAudience.Public
   @InterfaceStability.Evolving
   @InterfaceStability.Evolving
   public static enum AuthenticationMethod {
   public static enum AuthenticationMethod {
     SIMPLE,
     SIMPLE,
@@ -908,6 +925,8 @@ public class UserGroupInformation {
    * @param realUser
    * @param realUser
    * @return proxyUser ugi
    * @return proxyUser ugi
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static UserGroupInformation createProxyUser(String user,
   public static UserGroupInformation createProxyUser(String user,
       UserGroupInformation realUser) {
       UserGroupInformation realUser) {
     if (user == null || "".equals(user)) {
     if (user == null || "".equals(user)) {
@@ -929,6 +948,8 @@ public class UserGroupInformation {
    * get RealUser (vs. EffectiveUser)
    * get RealUser (vs. EffectiveUser)
    * @return realUser running over proxy user
    * @return realUser running over proxy user
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public UserGroupInformation getRealUser() {
   public UserGroupInformation getRealUser() {
     for (RealUser p: subject.getPrincipals(RealUser.class)) {
     for (RealUser p: subject.getPrincipals(RealUser.class)) {
       return p.getRealUser();
       return p.getRealUser();
@@ -974,7 +995,8 @@ public class UserGroupInformation {
    * @param userGroups the names of the groups that the user belongs to
    * @param userGroups the names of the groups that the user belongs to
    * @return a fake user for running unit tests
    * @return a fake user for running unit tests
    */
    */
-  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public static UserGroupInformation createUserForTesting(String user, 
   public static UserGroupInformation createUserForTesting(String user, 
                                                           String[] userGroups) {
                                                           String[] userGroups) {
     ensureInitialized();
     ensureInitialized();
@@ -1000,7 +1022,6 @@ public class UserGroupInformation {
    *          the names of the groups that the user belongs to
    *          the names of the groups that the user belongs to
    * @return a fake user for running unit tests
    * @return a fake user for running unit tests
    */
    */
-  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
   public static UserGroupInformation createProxyUserForTesting(String user,
   public static UserGroupInformation createProxyUserForTesting(String user,
       UserGroupInformation realUser, String[] userGroups) {
       UserGroupInformation realUser, String[] userGroups) {
     ensureInitialized();
     ensureInitialized();
@@ -1029,6 +1050,8 @@ public class UserGroupInformation {
    * Get the user's full principal name.
    * Get the user's full principal name.
    * @return the user's full principal name.
    * @return the user's full principal name.
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public String getUserName() {
   public String getUserName() {
     return user.getName();
     return user.getName();
   }
   }
@@ -1182,6 +1205,8 @@ public class UserGroupInformation {
    * @param action the method to execute
    * @param action the method to execute
    * @return the value from the run method
    * @return the value from the run method
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public <T> T doAs(PrivilegedAction<T> action) {
   public <T> T doAs(PrivilegedAction<T> action) {
     logPrivilegedAction(subject, action);
     logPrivilegedAction(subject, action);
     return Subject.doAs(subject, action);
     return Subject.doAs(subject, action);
@@ -1198,6 +1223,8 @@ public class UserGroupInformation {
    * @throws InterruptedException if the action throws an InterruptedException
    * @throws InterruptedException if the action throws an InterruptedException
    * @throws UndeclaredThrowableException if the action throws something else
    * @throws UndeclaredThrowableException if the action throws something else
    */
    */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
   public <T> T doAs(PrivilegedExceptionAction<T> action
   public <T> T doAs(PrivilegedExceptionAction<T> action
                     ) throws IOException, InterruptedException {
                     ) throws IOException, InterruptedException {
     try {
     try {

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java

@@ -41,6 +41,7 @@ public class DelegationKey implements Writable {
   private long expiryDate;
   private long expiryDate;
   @Nullable
   @Nullable
   private byte[] keyBytes = null;
   private byte[] keyBytes = null;
+  private static final int MAX_KEY_LEN = 1024 * 1024;
 
 
   /** Default constructore required for Writable */
   /** Default constructore required for Writable */
   public DelegationKey() {
   public DelegationKey() {
@@ -55,6 +56,10 @@ public class DelegationKey implements Writable {
     this.keyId = keyId;
     this.keyId = keyId;
     this.expiryDate = expiryDate;
     this.expiryDate = expiryDate;
     if (encodedKey != null) {
     if (encodedKey != null) {
+      if (encodedKey.length > MAX_KEY_LEN) {
+        throw new RuntimeException("can't create " + encodedKey.length +
+            " byte long DelegationKey.");
+      }
       this.keyBytes = encodedKey;
       this.keyBytes = encodedKey;
     }
     }
   }
   }
@@ -102,7 +107,7 @@ public class DelegationKey implements Writable {
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     keyId = WritableUtils.readVInt(in);
     keyId = WritableUtils.readVInt(in);
     expiryDate = WritableUtils.readVLong(in);
     expiryDate = WritableUtils.readVLong(in);
-    int len = WritableUtils.readVInt(in);
+    int len = WritableUtils.readVIntInRange(in, -1, MAX_KEY_LEN);
     if (len == -1) {
     if (len == -1) {
       keyBytes = null;
       keyBytes = null;
     } else {
     } else {

+ 28 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java

@@ -21,8 +21,10 @@ package org.apache.hadoop.util;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.IOException;
 import java.io.IOException;
 
 
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.*;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
@@ -135,4 +137,30 @@ public abstract class ProtoUtil {
     }
     }
     return ugi;
     return ugi;
   }
   }
+  
+  static RpcKindProto convert(RPC.RpcKind kind) {
+    switch (kind) {
+    case RPC_BUILTIN: return RpcKindProto.RPC_BUILTIN;
+    case RPC_WRITABLE: return RpcKindProto.RPC_WRITABLE;
+    case RPC_PROTOCOL_BUFFER: return RpcKindProto.RPC_PROTOCOL_BUFFER;
+    }
+    return null;
+  }
+  
+  
+  public static RPC.RpcKind convert( RpcKindProto kind) {
+    switch (kind) {
+    case RPC_BUILTIN: return RPC.RpcKind.RPC_BUILTIN;
+    case RPC_WRITABLE: return RPC.RpcKind.RPC_WRITABLE;
+    case RPC_PROTOCOL_BUFFER: return RPC.RpcKind.RPC_PROTOCOL_BUFFER;
+    }
+    return null;
+  }
+ 
+  public static RpcPayloadHeaderProto makeRpcPayloadHeader(RPC.RpcKind rpcKind,
+      RpcPayloadOperationProto operation, int callId) {
+    RpcPayloadHeaderProto.Builder result = RpcPayloadHeaderProto.newBuilder();
+    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId);
+    return result.build();
+  }
 }
 }

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -50,6 +50,11 @@ public class RunJar {
   /** Pattern that matches any string */
   /** Pattern that matches any string */
   public static final Pattern MATCH_ANY = Pattern.compile(".*");
   public static final Pattern MATCH_ANY = Pattern.compile(".*");
 
 
+  /**
+   * Priority of the RunJar shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 10;
+
   /**
   /**
    * Unpack a jar file into a directory.
    * Unpack a jar file into a directory.
    *
    *
@@ -167,11 +172,14 @@ public class RunJar {
     }
     }
     ensureDirectory(workDir);
     ensureDirectory(workDir);
 
 
-    Runtime.getRuntime().addShutdownHook(new Thread() {
+    ShutdownHookManager.get().addShutdownHook(
+      new Runnable() {
+        @Override
         public void run() {
         public void run() {
           FileUtil.fullyDelete(workDir);
           FileUtil.fullyDelete(workDir);
         }
         }
-      });
+      }, SHUTDOWN_HOOK_PRIORITY);
+
 
 
     unJar(file, workDir);
     unJar(file, workDir);
 
 

+ 0 - 58
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -81,64 +81,6 @@ abstract public class Shell {
   /** If or not script timed out*/
   /** If or not script timed out*/
   private AtomicBoolean timedOut;
   private AtomicBoolean timedOut;
 
 
-  /** a Unix command to get ulimit of a process. */
-  public static final String ULIMIT_COMMAND = "ulimit";
-  
-  /** 
-   * Get the Unix command for setting the maximum virtual memory available
-   * to a given child process. This is only relevant when we are forking a
-   * process from within the Mapper or the Reducer implementations.
-   * Also see Hadoop Pipes and Hadoop Streaming.
-   * 
-   * It also checks to ensure that we are running on a *nix platform else 
-   * (e.g. in Cygwin/Windows) it returns <code>null</code>.
-   * @param memoryLimit virtual memory limit
-   * @return a <code>String[]</code> with the ulimit command arguments or 
-   *         <code>null</code> if we are running on a non *nix platform or
-   *         if the limit is unspecified.
-   */
-  public static String[] getUlimitMemoryCommand(int memoryLimit) {
-    // ulimit isn't supported on Windows
-    if (WINDOWS) {
-      return null;
-    }
-    
-    return new String[] {ULIMIT_COMMAND, "-v", String.valueOf(memoryLimit)};
-  }
-  
-  /** 
-   * Get the Unix command for setting the maximum virtual memory available
-   * to a given child process. This is only relevant when we are forking a
-   * process from within the Mapper or the Reducer implementations.
-   * see also Hadoop Pipes and Streaming.
-   * 
-   * It also checks to ensure that we are running on a *nix platform else 
-   * (e.g. in Cygwin/Windows) it returns <code>null</code>.
-   * @param conf configuration
-   * @return a <code>String[]</code> with the ulimit command arguments or 
-   *         <code>null</code> if we are running on a non *nix platform or
-   *         if the limit is unspecified.
-   * @deprecated Use {@link #getUlimitMemoryCommand(int)}
-   */
-  @Deprecated
-  public static String[] getUlimitMemoryCommand(Configuration conf) {
-    // ulimit isn't supported on Windows
-    if (WINDOWS) {
-      return null;
-    }
-    
-    // get the memory limit from the configuration
-    String ulimit = conf.get("mapred.child.ulimit");
-    if (ulimit == null) {
-      return null;
-    }
-    
-    // Parse it to ensure it is legal/sane
-    int memoryLimit = Integer.valueOf(ulimit);
-    
-    return getUlimitMemoryCommand(memoryLimit);
-  }
-  
   /** Set to true on Windows platforms */
   /** Set to true on Windows platforms */
   public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
   public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
                 = System.getProperty("os.name").startsWith("Windows");
                 = System.getProperty("os.name").startsWith("Windows");

+ 181 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java

@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The <code>ShutdownHookManager</code> enables running shutdownHook
+ * in a determistic order, higher priority first.
+ * <p/>
+ * The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
+ * This class registers a single JVM shutdownHook and run all the
+ * shutdownHooks registered to it (to this class) in order based on their
+ * priority.
+ */
+public class ShutdownHookManager {
+
+  private static final ShutdownHookManager MGR = new ShutdownHookManager();
+
+  private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
+
+  static {
+    Runtime.getRuntime().addShutdownHook(
+      new Thread() {
+        @Override
+        public void run() {
+          MGR.shutdownInProgress.set(true);
+          for (Runnable hook: MGR.getShutdownHooksInOrder()) {
+            try {
+              hook.run();
+            } catch (Throwable ex) {
+              LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() +
+                       "' failed, " + ex.toString(), ex);
+            }
+          }
+        }
+      }
+    );
+  }
+
+  /**
+   * Return <code>ShutdownHookManager</code> singleton.
+   *
+   * @return <code>ShutdownHookManager</code> singleton.
+   */
+  public static ShutdownHookManager get() {
+    return MGR;
+  }
+
+  /**
+   * Private structure to store ShutdownHook and its priority.
+   */
+  private static class HookEntry {
+    Runnable hook;
+    int priority;
+
+    public HookEntry(Runnable hook, int priority) {
+      this.hook = hook;
+      this.priority = priority;
+    }
+
+    @Override
+    public int hashCode() {
+      return hook.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      boolean eq = false;
+      if (obj != null) {
+        if (obj instanceof HookEntry) {
+          eq = (hook == ((HookEntry)obj).hook);
+        }
+      }
+      return eq;
+    }
+
+  }
+
+  private Set<HookEntry> hooks =
+    Collections.synchronizedSet(new HashSet<HookEntry>());
+
+  private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
+
+  //private to constructor to ensure singularity
+  private ShutdownHookManager() {
+  }
+
+  /**
+   * Returns the list of shutdownHooks in order of execution,
+   * Highest priority first.
+   *
+   * @return the list of shutdownHooks in order of execution.
+   */
+  List<Runnable> getShutdownHooksInOrder() {
+    List<HookEntry> list;
+    synchronized (MGR.hooks) {
+      list = new ArrayList<HookEntry>(MGR.hooks);
+    }
+    Collections.sort(list, new Comparator<HookEntry>() {
+
+      //reversing comparison so highest priority hooks are first
+      @Override
+      public int compare(HookEntry o1, HookEntry o2) {
+        return o2.priority - o1.priority;
+      }
+    });
+    List<Runnable> ordered = new ArrayList<Runnable>();
+    for (HookEntry entry: list) {
+      ordered.add(entry.hook);
+    }
+    return ordered;
+  }
+
+  /**
+   * Adds a shutdownHook with a priority, the higher the priority
+   * the earlier will run. ShutdownHooks with same priority run
+   * in a non-deterministic order.
+   *
+   * @param shutdownHook shutdownHook <code>Runnable</code>
+   * @param priority priority of the shutdownHook.
+   */
+  public void addShutdownHook(Runnable shutdownHook, int priority) {
+    if (shutdownHook == null) {
+      throw new IllegalArgumentException("shutdownHook cannot be NULL");
+    }
+    if (shutdownInProgress.get()) {
+      throw new IllegalStateException("Shutdown in progress, cannot add a shutdownHook");
+    }
+    hooks.add(new HookEntry(shutdownHook, priority));
+  }
+
+  /**
+   * Removes a shutdownHook.
+   *
+   * @param shutdownHook shutdownHook to remove.
+   * @return TRUE if the shutdownHook was registered and removed,
+   * FALSE otherwise.
+   */
+  public boolean removeShutdownHook(Runnable shutdownHook) {
+    if (shutdownInProgress.get()) {
+      throw new IllegalStateException("Shutdown in progress, cannot remove a shutdownHook");
+    }
+    return hooks.remove(new HookEntry(shutdownHook, 0));
+  }
+
+  /**
+   * Indicates if a shutdownHook is registered or nt.
+   *
+   * @param shutdownHook shutdownHook to check if registered.
+   * @return TRUE/FALSE depending if the shutdownHook is is registered.
+   */
+  public boolean hasShutdownHook(Runnable shutdownHook) {
+    return hooks.contains(new HookEntry(shutdownHook, 0));
+  }
+
+}

+ 14 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -46,6 +46,11 @@ import org.apache.hadoop.net.NetUtils;
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class StringUtils {
 public class StringUtils {
 
 
+  /**
+   * Priority of the StringUtils shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 0;
+
   private static final DecimalFormat decimalFormat;
   private static final DecimalFormat decimalFormat;
   static {
   static {
           NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
           NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
@@ -600,12 +605,15 @@ public class StringUtils {
         )
         )
       );
       );
 
 
-    Runtime.getRuntime().addShutdownHook(new Thread() {
-      public void run() {
-        LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
-          "Shutting down " + classname + " at " + hostname}));
-      }
-    });
+    ShutdownHookManager.get().addShutdownHook(
+      new Runnable() {
+        @Override
+        public void run() {
+          LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
+            "Shutting down " + classname + " at " + hostname}));
+        }
+      }, SHUTDOWN_HOOK_PRIORITY);
+
   }
   }
 
 
   /**
   /**

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/mapred-site.xml

@@ -108,11 +108,6 @@
     <value>-server -Xmx640m -Djava.net.preferIPv4Stack=true</value>
     <value>-server -Xmx640m -Djava.net.preferIPv4Stack=true</value>
   </property>
   </property>
 
 
-  <property>
-    <name>mapred.child.ulimit</name>
-    <value>8388608</value>
-  </property>
-
   <property>
   <property>
     <name>mapred.job.tracker.persist.jobstatus.active</name>
     <name>mapred.job.tracker.persist.jobstatus.active</name>
     <value>true</value>
     <value>true</value>

+ 58 - 0
hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+option java_package = "org.apache.hadoop.ipc.protobuf";
+option java_outer_classname = "RpcPayloadHeaderProtos";
+option java_generate_equals_and_hash = true;
+
+
+/**
+ * This is the rpc payload header. It is sent with every rpc call.
+ * 
+ * The format of RPC call is as follows:
+ * +-----------------------------------------------------+
+ * |  Rpc length in bytes                                |
+ * +-----------------------------------------------------+
+ * | RpcPayloadHeader - serialized delimited ie has len  |
+ * +-----------------------------------------------------+
+ * |  RpcRequest Payload                                 |
+ * +-----------------------------------------------------+
+ *
+ */
+
+
+
+/**
+ * RpcKind determine the rpcEngine and the serialization of the rpc payload
+ */
+enum RpcKindProto {
+  RPC_BUILTIN          = 0;  // Used for built in calls by tests
+  RPC_WRITABLE         = 1;  // Use WritableRpcEngine 
+  RPC_PROTOCOL_BUFFER  = 2;  // Use ProtobufRpcEngine
+}
+
+enum RpcPayloadOperationProto {
+  RPC_FINAL_PAYLOAD        = 0; // The final payload
+  RPC_CONTINUATION_PAYLOAD = 1; // not implemented yet
+  RPC_CLOSE_CONNECTION     = 2; // close the rpc connection
+}
+   
+message RpcPayloadHeaderProto { // the header for the RpcRequest
+  optional RpcKindProto rpcKind = 1;
+  optional RpcPayloadOperationProto rpcOp = 2;
+  optional uint32 callId = 3; // each rpc has a callId that is also used in response
+}

+ 7 - 6
dev-support/test-patch.properties → hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-# The number of acceptable warning for *all* modules
-# Please update the per-module test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=8
+org.apache.hadoop.fs.LocalFileSystem
+org.apache.hadoop.fs.viewfs.ViewFileSystem
+org.apache.hadoop.fs.s3.S3FileSystem
+org.apache.hadoop.fs.s3native.NativeS3FileSystem
+org.apache.hadoop.fs.kfs.KosmosFileSystem
+org.apache.hadoop.fs.ftp.FTPFileSystem
+org.apache.hadoop.fs.HarFileSystem

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.io.compress.CompressionCodec

@@ -0,0 +1,20 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.io.compress.BZip2Codec
+org.apache.hadoop.io.compress.DefaultCodec
+org.apache.hadoop.io.compress.DeflateCodec
+org.apache.hadoop.io.compress.GzipCodec
+org.apache.hadoop.io.compress.Lz4Codec
+org.apache.hadoop.io.compress.SnappyCodec
+

+ 5 - 73
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -268,9 +268,11 @@
 
 
 <property>
 <property>
   <name>io.compression.codecs</name>
   <name>io.compression.codecs</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec</value>
-  <description>A list of the compression codec classes that can be used 
-               for compression/decompression.</description>
+  <value></value>
+  <description>A comma-separated list of the compression codec classes that can
+  be used for compression/decompression. In addition to any classes specified
+  with this property (which take precedence), codec classes on the classpath
+  are discovered using a Java ServiceLoader.</description>
 </property>
 </property>
 
 
 <property>
 <property>
@@ -350,25 +352,6 @@
   </description>
   </description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.file.impl</name>
-  <value>org.apache.hadoop.fs.LocalFileSystem</value>
-  <description>The FileSystem for file: uris.</description>
-</property>
-
-<property>
-  <name>fs.hdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
-  <description>The FileSystem for hdfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.viewfs.impl</name>
-  <value>org.apache.hadoop.fs.viewfs.ViewFileSystem</value>
-  <description>The FileSystem for view file system for viewfs: uris
-  (ie client side mount table:).</description>
-</property>
-
 <property>
 <property>
   <name>fs.AbstractFileSystem.file.impl</name>
   <name>fs.AbstractFileSystem.file.impl</name>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
@@ -389,45 +372,6 @@
   (ie client side mount table:).</description>
   (ie client side mount table:).</description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.s3.impl</name>
-  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
-  <description>The FileSystem for s3: uris.</description>
-</property>
-
-<property>
-  <name>fs.s3n.impl</name>
-  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
-  <description>The FileSystem for s3n: (Native S3) uris.</description>
-</property>
-
-<property>
-  <name>fs.kfs.impl</name>
-  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
-  <description>The FileSystem for kfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.hftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.hsftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.webhdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
-</property>
-
-<property>
-  <name>fs.ftp.impl</name>
-  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
-  <description>The FileSystem for ftp: uris.</description>
-</property>
-
 <property>
 <property>
   <name>fs.ftp.host</name>
   <name>fs.ftp.host</name>
   <value>0.0.0.0</value>
   <value>0.0.0.0</value>
@@ -442,18 +386,6 @@
   </description>
   </description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.har.impl</name>
-  <value>org.apache.hadoop.fs.HarFileSystem</value>
-  <description>The filesystem for Hadoop archives. </description>
-</property>
-
-<property>
-  <name>fs.har.impl.disable.cache</name>
-  <value>true</value>
-  <description>Don't cache 'har' filesystem instances.</description>
-</property>
-
 <property>
 <property>
   <name>fs.df.interval</name>
   <name>fs.df.interval</name>
   <value>60000</value>
   <value>60000</value>

+ 0 - 4
hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm

@@ -314,8 +314,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.map.child.log.level | mapreduce.map.log.level
 |mapred.map.child.log.level | mapreduce.map.log.level
 *---+---+
 *---+---+
-|mapred.map.child.ulimit | mapreduce.map.ulimit
-*---+---+
 |mapred.map.max.attempts | mapreduce.map.maxattempts
 |mapred.map.max.attempts | mapreduce.map.maxattempts
 *---+---+
 *---+---+
 |mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
 |mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
@@ -378,8 +376,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.reduce.child.log.level | mapreduce.reduce.log.level
 |mapred.reduce.child.log.level | mapreduce.reduce.log.level
 *---+---+
 *---+---+
-|mapred.reduce.child.ulimit | mapreduce.reduce.ulimit
-*---+---+
 |mapred.reduce.max.attempts | mapreduce.reduce.maxattempts
 |mapred.reduce.max.attempts | mapreduce.reduce.maxattempts
 *---+---+
 *---+---+
 |mapred.reduce.parallel.copies | mapreduce.reduce.shuffle.parallelcopies
 |mapred.reduce.parallel.copies | mapreduce.reduce.shuffle.parallelcopies

+ 22 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.StringWriter;
 import java.io.StringWriter;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -671,6 +672,27 @@ public class TestConfiguration extends TestCase {
     }
     }
   }
   }
 
 
+  public void testSetSocketAddress() throws IOException {
+    Configuration conf = new Configuration();
+    NetUtils.addStaticResolution("host", "127.0.0.1");
+    final String defaultAddr = "host:1";
+    
+    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);    
+    conf.setSocketAddr("myAddress", addr);
+    assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
+  }
+  
+  public void testUpdateSocketAddress() throws IOException {
+    InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
+    InetSocketAddress connectAddr = conf.updateConnectAddr("myAddress", addr);
+    assertEquals(connectAddr.getHostName(), addr.getHostName());
+    
+    addr = new InetSocketAddress(1);
+    connectAddr = conf.updateConnectAddr("myAddress", addr);
+    assertEquals(connectAddr.getHostName(),
+                 InetAddress.getLocalHost().getHostName());
+  }
+
   public void testReload() throws IOException {
   public void testReload() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     startConfig();

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java

@@ -164,7 +164,7 @@ public class TestConfigurationDeprecation {
     conf.set("Y", "y");
     conf.set("Y", "y");
     conf.set("Z", "z");
     conf.set("Z", "z");
     // get old key
     // get old key
-    assertEquals("y", conf.get("X"));
+    assertEquals("z", conf.get("X"));
   }
   }
 
 
   /**
   /**
@@ -305,7 +305,7 @@ public class TestConfigurationDeprecation {
     assertTrue("deprecated Key not found", dKFound);
     assertTrue("deprecated Key not found", dKFound);
     assertTrue("new Key not found", nKFound);
     assertTrue("new Key not found", nKFound);
   }
   }
-
+  
   @Test
   @Test
   public void testUnsetWithDeprecatedKeys() {
   public void testUnsetWithDeprecatedKeys() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();

+ 50 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java

@@ -18,10 +18,15 @@
 
 
 package org.apache.hadoop.conf;
 package org.apache.hadoop.conf;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
+import java.util.Map;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.junit.Test;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -53,4 +58,49 @@ public class TestDeprecatedKeys extends TestCase {
     assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
     assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
     assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
     assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
   }
   }
+  
+  @Test
+  public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys() {
+    Configuration conf = new Configuration();
+    Configuration.addDeprecation("dK", new String[]{"nK1", "nK2"});
+    conf.set("k", "v");
+    conf.set("dK", "V");
+    assertEquals("V", conf.get("dK"));
+    assertEquals("V", conf.get("nK1"));
+    assertEquals("V", conf.get("nK2"));
+    conf.set("nK1", "VV");
+    assertEquals("VV", conf.get("dK"));
+    assertEquals("VV", conf.get("nK1"));
+    assertEquals("VV", conf.get("nK2"));
+    conf.set("nK2", "VVV");
+    assertEquals("VVV", conf.get("dK"));
+    assertEquals("VVV", conf.get("nK2"));
+    assertEquals("VVV", conf.get("nK1"));
+    boolean kFound = false;
+    boolean dKFound = false;
+    boolean nK1Found = false;
+    boolean nK2Found = false;
+    for (Map.Entry<String, String> entry : conf) {
+      if (entry.getKey().equals("k")) {
+        assertEquals("v", entry.getValue());
+        kFound = true;
+      }
+      if (entry.getKey().equals("dK")) {
+        assertEquals("VVV", entry.getValue());
+        dKFound = true;
+      }
+      if (entry.getKey().equals("nK1")) {
+        assertEquals("VVV", entry.getValue());
+        nK1Found = true;
+      }
+      if (entry.getKey().equals("nK2")) {
+        assertEquals("VVV", entry.getValue());
+        nK2Found = true;
+      }
+    }
+    assertTrue("regular Key not found", kFound);
+    assertTrue("deprecated Key not found", dKFound);
+    assertTrue("new Key 1 not found", nK1Found);
+    assertTrue("new Key 2 not found", nK2Found);
+  }
 }
 }

+ 176 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Test;
+
+public class TestAfsCheckPath {
+  
+  private static int DEFAULT_PORT = 1234;
+  private static int OTHER_PORT = 4321;
+  
+  @Test
+  public void testCheckPathWithNoPorts() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host");
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host"));
+  }
+  
+  @Test
+  public void testCheckPathWithDefaultPort() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + DEFAULT_PORT));
+  }
+  
+  @Test
+  public void testCheckPathWithTheSameNonDefaultPort()
+      throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + OTHER_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
+  }
+  
+  @Test(expected=InvalidPathException.class)
+  public void testCheckPathWithDifferentPorts() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
+  }
+  
+  private static class DummyFileSystem extends AbstractFileSystem {
+    
+    public DummyFileSystem(URI uri) throws URISyntaxException {
+      super(uri, "dummy", true, DEFAULT_PORT);
+    }
+    
+    @Override
+    public int getUriDefaultPort() {
+      return DEFAULT_PORT;
+    }
+
+    @Override
+    public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
+        FsPermission absolutePermission, int bufferSize, short replication,
+        long blockSize, Progressable progress, int bytesPerChecksum,
+        boolean createParent) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public boolean delete(Path f, boolean recursive)
+        throws AccessControlException, FileNotFoundException,
+        UnresolvedLinkException, IOException {
+      // deliberately empty
+      return false;
+    }
+
+    @Override
+    public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
+        throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileChecksum getFileChecksum(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileStatus getFileStatus(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FsStatus getFsStatus() throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FsServerDefaults getServerDefaults() throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileStatus[] listStatus(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public void mkdir(Path dir, FsPermission permission, boolean createParent)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public void renameInternal(Path src, Path dst) throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setOwner(Path f, String username, String groupname)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setPermission(Path f, FsPermission permission)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public boolean setReplication(Path f, short replication) throws IOException {
+      // deliberately empty
+      return false;
+    }
+
+    @Override
+    public void setTimes(Path f, long mtime, long atime) throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
+      // deliberately empty
+    }
+    
+  }
+}

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Set;
 import java.util.Set;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -65,7 +66,7 @@ public class TestFileContextDeleteOnExit {
     checkDeleteOnExitData(1, fc, file1);
     checkDeleteOnExitData(1, fc, file1);
     
     
     // Ensure shutdown hook is added
     // Ensure shutdown hook is added
-    Assert.assertTrue(Runtime.getRuntime().removeShutdownHook(FileContext.FINALIZER));
+    Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
     
     
     Path file2 = getTestRootPath(fc, "dir1/file2");
     Path file2 = getTestRootPath(fc, "dir1/file2");
     createFile(fc, file2, numBlocks, blockSize);
     createFile(fc, file2, numBlocks, blockSize);
@@ -79,8 +80,7 @@ public class TestFileContextDeleteOnExit {
     
     
     // trigger deleteOnExit and ensure the registered
     // trigger deleteOnExit and ensure the registered
     // paths are cleaned up
     // paths are cleaned up
-    FileContext.FINALIZER.start();
-    FileContext.FINALIZER.join();
+    FileContext.FINALIZER.run();
     checkDeleteOnExitData(0, fc, new Path[0]);
     checkDeleteOnExitData(0, fc, new Path[0]);
     Assert.assertFalse(exists(fc, file1));
     Assert.assertFalse(exists(fc, file1));
     Assert.assertFalse(exists(fc, file2));
     Assert.assertFalse(exists(fc, file2));

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java

@@ -43,7 +43,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCacheEnabled() throws Exception {
   public void testCacheEnabled() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
     assertSame(fs1, fs2);
     assertSame(fs1, fs2);
@@ -84,7 +84,7 @@ public class TestFileSystemCaching {
     // wait for InitializeForeverFileSystem to start initialization
     // wait for InitializeForeverFileSystem to start initialization
     InitializeForeverFileSystem.sem.acquire();
     InitializeForeverFileSystem.sem.acquire();
     
     
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem.get(new URI("cachedfile://a"), conf);
     t.interrupt();
     t.interrupt();
     t.join();
     t.join();
@@ -93,7 +93,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCacheDisabled() throws Exception {
   public void testCacheDisabled() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("fs.uncachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.uncachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
     conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
     FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
@@ -104,7 +104,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
   public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
     UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@@ -156,7 +156,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testUserFS() throws Exception {
   public void testUserFS() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
     FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
     FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
     FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
     
     
@@ -166,7 +166,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testFsUniqueness() throws Exception {
   public void testFsUniqueness() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     // multiple invocations of FileSystem.get return the same object.
     // multiple invocations of FileSystem.get return the same object.
     FileSystem fs1 = FileSystem.get(conf);
     FileSystem fs1 = FileSystem.get(conf);
     FileSystem fs2 = FileSystem.get(conf);
     FileSystem fs2 = FileSystem.get(conf);
@@ -183,7 +183,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCloseAllForUGI() throws Exception {
   public void testCloseAllForUGI() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
       public FileSystem run() throws Exception {
       public FileSystem run() throws Exception {

+ 4 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -165,7 +165,10 @@ public class TestFilterFileSystem {
     public Token<?> getDelegationToken(String renewer) throws IOException {
     public Token<?> getDelegationToken(String renewer) throws IOException {
       return null;
       return null;
     }
     }
-    
+
+    public String getScheme() {
+      return "dontcheck";
+    }
   }
   }
   
   
   @Test
   @Test

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -99,6 +99,19 @@ public class HttpServerFunctionalTest extends Assert {
     }
     }
   }
   }
 
 
+  /**
+   * Create an HttpServer instance on the given address for the given webapp
+   * @param host to bind
+   * @param port to bind
+   * @return the server
+   * @throws IOException if it could not be created
+   */
+  public static HttpServer createServer(String host, int port)
+      throws IOException {
+    prepareTestWebapp();
+    return new HttpServer(TEST, host, port, true);
+  }
+
   /**
   /**
    * Create an HttpServer instance for the given webapp
    * Create an HttpServer instance for the given webapp
    * @param webapp the webapp to work with
    * @param webapp the webapp to work with

+ 96 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.http;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.Enumeration;
@@ -35,6 +36,7 @@ import java.util.concurrent.Executors;
 import javax.servlet.Filter;
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.ServletResponse;
@@ -53,10 +55,12 @@ import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
@@ -422,4 +426,96 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     assertEquals("bar", m.get(JerseyResource.OP));
     assertEquals("bar", m.get(JerseyResource.OP));
     LOG.info("END testJersey()");
     LOG.info("END testJersey()");
   }
   }
+
+  @Test
+  public void testHasAdministratorAccess() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null);
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(request.getRemoteUser()).thenReturn(null);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    //authorization OFF
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+    //authorization ON & user NULL
+    response = Mockito.mock(HttpServletResponse.class);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+    //authorization ON & user NOT NULL & ACLs NULL
+    response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getRemoteUser()).thenReturn("foo");
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+    //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
+    response = Mockito.mock(HttpServletResponse.class);
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+    //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
+    response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+  }
+
+  @Test public void testBindAddress() throws Exception {
+    checkBindAddress("0.0.0.0", 0, false).stop();
+    // hang onto this one for a bit more testing
+    HttpServer myServer = checkBindAddress("localhost", 0, false);
+    HttpServer myServer2 = null;
+    try { 
+      int port = myServer.getListenerAddress().getPort();
+      // it's already in use, true = expect a higher port
+      myServer2 = checkBindAddress("localhost", port, true);
+      // try to reuse the port
+      port = myServer2.getListenerAddress().getPort();
+      myServer2.stop();
+      assertEquals(-1, myServer2.getPort()); // not bound
+      myServer2.openListener();
+      assertEquals(port, myServer2.getPort()); // expect same port
+    } finally {
+      myServer.stop();
+      if (myServer2 != null) {
+        myServer2.stop();
+      }
+    }
+  }
+  
+  private HttpServer checkBindAddress(String host, int port, boolean findPort)
+      throws Exception {
+    HttpServer server = createServer(host, port);
+    try {
+      // not bound, ephemeral should return requested port (0 for ephemeral)
+      InetSocketAddress addr = server.getListenerAddress();
+      assertEquals(port, addr.getPort());
+      // verify hostname is what was given
+      server.openListener();
+      addr = server.getListenerAddress();
+      assertEquals(host, addr.getHostName());
+
+      int boundPort = addr.getPort();
+      if (port == 0) {
+        assertTrue(boundPort != 0); // ephemeral should now return bound port
+      } else if (findPort) {
+        assertTrue(boundPort > port);
+        // allow a little wiggle room to prevent random test failures if
+        // some consecutive ports are already in use
+        assertTrue(addr.getPort() - port < 8);
+      }
+    } catch (Exception e) {
+      server.stop();
+      throw e;
+    }
+    return server;
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java

@@ -470,7 +470,7 @@ public class TestSequenceFile extends TestCase {
     SequenceFile.Writer writer = SequenceFile.createWriter(
     SequenceFile.Writer writer = SequenceFile.createWriter(
         spyFs, conf, p, NullWritable.class, NullWritable.class);
         spyFs, conf, p, NullWritable.class, NullWritable.class);
     writer.close();
     writer.close();
-    Mockito.verify(spyFs).getDefaultReplication();
+    Mockito.verify(spyFs).getDefaultReplication(p);
   }
   }
 
 
   private static class TestFSDataInputStream extends FSDataInputStream {
   private static class TestFSDataInputStream extends FSDataInputStream {

+ 25 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java

@@ -44,6 +44,26 @@ public class TestWritableUtils extends TestCase {
     assertEquals(vintlen, WritableUtils.getVIntSize(val));
     assertEquals(vintlen, WritableUtils.getVIntSize(val));
     assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
     assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
   }
   }
+  
+  public static void testReadInRange(long val, int lower,
+      int upper, boolean expectSuccess) throws IOException {
+    DataOutputBuffer buf = new DataOutputBuffer();
+    DataInputBuffer inbuf = new DataInputBuffer();
+    WritableUtils.writeVLong(buf, val);
+    try {
+      inbuf.reset(buf.getData(), 0, buf.getLength());
+      long val2 = WritableUtils.readVIntInRange(inbuf, lower, upper);
+      if (!expectSuccess) {
+        fail("expected readVIntInRange to throw an exception");
+      }
+      assertEquals(val, val2);
+    } catch(IOException e) {
+      if (expectSuccess) {
+        LOG.error("unexpected exception:", e);
+        fail("readVIntInRange threw an unexpected exception");
+      }
+    }
+  }
 
 
   public static void testVInt() throws Exception {
   public static void testVInt() throws Exception {
     testValue(12, 1);
     testValue(12, 1);
@@ -61,5 +81,10 @@ public class TestWritableUtils extends TestCase {
     testValue(-65536, 3);
     testValue(-65536, 3);
     testValue(65536, 4);
     testValue(65536, 4);
     testValue(-65537, 4);
     testValue(-65537, 4);
+    testReadInRange(123, 122, 123, true);
+    testReadInRange(123, 0, 100, false);
+    testReadInRange(0, 0, 100, true);
+    testReadInRange(-1, 0, 100, false);
+    testReadInRange(1099511627776L, 0, Integer.MAX_VALUE, false);
   }
   }
 }
 }

+ 33 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java

@@ -101,6 +101,12 @@ public class TestCodecFactory extends TestCase {
     }
     }
   }
   }
   
   
+  private static class NewGzipCodec extends BaseCodec {
+    public String getDefaultExtension() {
+      return ".gz";
+    }
+  }
+  
   /**
   /**
    * Returns a factory for a given set of codecs
    * Returns a factory for a given set of codecs
    * @param classes the codec classes to include
    * @param classes the codec classes to include
@@ -167,32 +173,43 @@ public class TestCodecFactory extends TestCase {
     checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
     checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
 
 
     factory = setClasses(new Class[0]);
     factory = setClasses(new Class[0]);
+    // gz, bz2, snappy, lz4 are picked up by service loader, but bar isn't
     codec = factory.getCodec(new Path("/tmp/foo.bar"));
     codec = factory.getCodec(new Path("/tmp/foo.bar"));
-    assertEquals("empty codec bar codec", null, codec);
+    assertEquals("empty factory bar codec", null, codec);
     codec = factory.getCodecByClassName(BarCodec.class.getCanonicalName());
     codec = factory.getCodecByClassName(BarCodec.class.getCanonicalName());
-    assertEquals("empty codec bar codec", null, codec);
+    assertEquals("empty factory bar codec", null, codec);
     
     
     codec = factory.getCodec(new Path("/tmp/foo.gz"));
     codec = factory.getCodec(new Path("/tmp/foo.gz"));
-    assertEquals("empty codec gz codec", null, codec);
+    checkCodec("empty factory gz codec", GzipCodec.class, codec);
     codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
     codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
-    assertEquals("empty codec gz codec", null, codec);
+    checkCodec("empty factory gz codec", GzipCodec.class, codec);
     
     
     codec = factory.getCodec(new Path("/tmp/foo.bz2"));
     codec = factory.getCodec(new Path("/tmp/foo.bz2"));
-    assertEquals("empty factory for .bz2", null, codec);
+    checkCodec("empty factory for .bz2", BZip2Codec.class, codec);
     codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
     codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
-    assertEquals("empty factory for bzip2 codec", null, codec);
+    checkCodec("empty factory for bzip2 codec", BZip2Codec.class, codec);
+    
+    codec = factory.getCodec(new Path("/tmp/foo.snappy"));
+    checkCodec("empty factory snappy codec", SnappyCodec.class, codec);
+    codec = factory.getCodecByClassName(SnappyCodec.class.getCanonicalName());
+    checkCodec("empty factory snappy codec", SnappyCodec.class, codec);
+    
+    codec = factory.getCodec(new Path("/tmp/foo.lz4"));
+    checkCodec("empty factory lz4 codec", Lz4Codec.class, codec);
+    codec = factory.getCodecByClassName(Lz4Codec.class.getCanonicalName());
+    checkCodec("empty factory lz4 codec", Lz4Codec.class, codec);
     
     
     factory = setClasses(new Class[]{BarCodec.class, FooCodec.class, 
     factory = setClasses(new Class[]{BarCodec.class, FooCodec.class, 
                                      FooBarCodec.class});
                                      FooBarCodec.class});
     codec = factory.getCodec(new Path("/tmp/.foo.bar.gz"));
     codec = factory.getCodec(new Path("/tmp/.foo.bar.gz"));
-    assertEquals("full factory gz codec", null, codec);
+    checkCodec("full factory gz codec", GzipCodec.class, codec);
     codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
     codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
-    assertEquals("full codec gz codec", null, codec);
+    checkCodec("full codec gz codec", GzipCodec.class, codec);
      
      
     codec = factory.getCodec(new Path("/tmp/foo.bz2"));
     codec = factory.getCodec(new Path("/tmp/foo.bz2"));
-    assertEquals("full factory for .bz2", null, codec);
+    checkCodec("full factory for .bz2", BZip2Codec.class, codec);
     codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
     codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
-    assertEquals("full codec bzip2 codec", null, codec);
+    checkCodec("full codec bzip2 codec", BZip2Codec.class, codec);
 
 
     codec = factory.getCodec(new Path("/tmp/foo.bar"));
     codec = factory.getCodec(new Path("/tmp/foo.bar"));
     checkCodec("full factory bar codec", BarCodec.class, codec);
     checkCodec("full factory bar codec", BarCodec.class, codec);
@@ -220,5 +237,11 @@ public class TestCodecFactory extends TestCase {
     checkCodec("full factory foo codec", FooCodec.class, codec);
     checkCodec("full factory foo codec", FooCodec.class, codec);
     codec = factory.getCodecByName("FOO");
     codec = factory.getCodecByName("FOO");
     checkCodec("full factory foo codec", FooCodec.class, codec);
     checkCodec("full factory foo codec", FooCodec.class, codec);
+    
+    factory = setClasses(new Class[]{NewGzipCodec.class});
+    codec = factory.getCodec(new Path("/tmp/foo.gz"));
+    checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
+    codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
+    checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
   }
   }
 }
 }

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
@@ -99,7 +98,7 @@ public class TestIPC {
     }
     }
 
 
     @Override
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param,
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
         long receiveTime) throws IOException {
         long receiveTime) throws IOException {
       if (sleep) {
       if (sleep) {
         // sleep a bit
         // sleep a bit

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 /**
 /**
@@ -73,7 +72,7 @@ public class TestIPCServerResponder extends TestCase {
     }
     }
 
 
     @Override
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param,
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
         long receiveTime) throws IOException {
         long receiveTime) throws IOException {
       if (sleep) {
       if (sleep) {
         try {
         try {

+ 4 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java

@@ -23,7 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 import org.junit.Assert;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
 import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
 import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
@@ -178,9 +177,9 @@ public class TestMultipleProtocolServer {
     // create a server with two handlers
     // create a server with two handlers
     server = RPC.getServer(Foo0.class,
     server = RPC.getServer(Foo0.class,
                               new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
                               new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
-    server.addProtocol(RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
-    server.addProtocol(RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
-    server.addProtocol(RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
     
     
     
     
     // Add Protobuf server
     // Add Protobuf server
@@ -189,7 +188,7 @@ public class TestMultipleProtocolServer {
         new PBServerImpl();
         new PBServerImpl();
     BlockingService service = TestProtobufRpcProto
     BlockingService service = TestProtobufRpcProto
         .newReflectiveBlockingService(pbServerImpl);
         .newReflectiveBlockingService(pbServerImpl);
-    server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
+    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
         service);
         service);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java

@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
@@ -122,7 +121,7 @@ public class TestProtoBufRpc {
     BlockingService service2 = TestProtobufRpc2Proto
     BlockingService service2 = TestProtobufRpc2Proto
         .newReflectiveBlockingService(server2Impl);
         .newReflectiveBlockingService(server2Impl);
     
     
-    server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
+    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
         service2);
         service2);
     server.start();
     server.start();
   }
   }

+ 9 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java

@@ -31,7 +31,6 @@ import junit.framework.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
@@ -134,7 +133,7 @@ public class TestRPCCompatibility {
     TestImpl1 impl = new TestImpl1();
     TestImpl1 impl = new TestImpl1();
     server = RPC.getServer(TestProtocol1.class,
     server = RPC.getServer(TestProtocol1.class,
                             impl, ADDRESS, 0, 2, false, conf, null);
                             impl, ADDRESS, 0, 2, false, conf, null);
-    server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -201,7 +200,7 @@ System.out.println("echo int is NOT supported");
     TestImpl1 impl = new TestImpl1();
     TestImpl1 impl = new TestImpl1();
     server = RPC.getServer(TestProtocol1.class,
     server = RPC.getServer(TestProtocol1.class,
                               impl, ADDRESS, 0, 2, false, conf, null);
                               impl, ADDRESS, 0, 2, false, conf, null);
-    server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -222,7 +221,7 @@ System.out.println("echo int is NOT supported");
     TestImpl2 impl = new TestImpl2();
     TestImpl2 impl = new TestImpl2();
     server = RPC.getServer(TestProtocol2.class,
     server = RPC.getServer(TestProtocol2.class,
                              impl, ADDRESS, 0, 2, false, conf, null);
                              impl, ADDRESS, 0, 2, false, conf, null);
-    server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
     server.start();
     server.start();
     addr = NetUtils.getConnectAddress(server);
     addr = NetUtils.getConnectAddress(server);
 
 
@@ -316,11 +315,11 @@ System.out.println("echo int is NOT supported");
     TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
     TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
         TestProtocol2.versionID, addr, conf);
         TestProtocol2.versionID, addr, conf);
     boolean supported = RpcClientUtil.isMethodSupported(proxy,
     boolean supported = RpcClientUtil.isMethodSupported(proxy,
-        TestProtocol2.class, RpcKind.RPC_WRITABLE,
+        TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
         RPC.getProtocolVersion(TestProtocol2.class), "echo");
         RPC.getProtocolVersion(TestProtocol2.class), "echo");
     Assert.assertTrue(supported);
     Assert.assertTrue(supported);
     supported = RpcClientUtil.isMethodSupported(proxy,
     supported = RpcClientUtil.isMethodSupported(proxy,
-        TestProtocol2.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(TestProtocol2.class), "echo");
         RPC.getProtocolVersion(TestProtocol2.class), "echo");
     Assert.assertFalse(supported);
     Assert.assertFalse(supported);
   }
   }
@@ -334,7 +333,7 @@ System.out.println("echo int is NOT supported");
     TestImpl1 impl = new TestImpl1();
     TestImpl1 impl = new TestImpl1();
     server = RPC.getServer(TestProtocol1.class, impl, ADDRESS, 0, 2, false,
     server = RPC.getServer(TestProtocol1.class, impl, ADDRESS, 0, 2, false,
         conf, null);
         conf, null);
-    server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
+    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
     server.start();
     server.start();
 
 
     ProtocolMetaInfoServerSideTranslatorPB xlator = 
     ProtocolMetaInfoServerSideTranslatorPB xlator = 
@@ -343,13 +342,13 @@ System.out.println("echo int is NOT supported");
     GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
     GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
         null,
         null,
         createGetProtocolSigRequestProto(TestProtocol1.class,
         createGetProtocolSigRequestProto(TestProtocol1.class,
-            RpcKind.RPC_PROTOCOL_BUFFER));
+            RPC.RpcKind.RPC_PROTOCOL_BUFFER));
     //No signatures should be found
     //No signatures should be found
     Assert.assertEquals(0, resp.getProtocolSignatureCount());
     Assert.assertEquals(0, resp.getProtocolSignatureCount());
     resp = xlator.getProtocolSignature(
     resp = xlator.getProtocolSignature(
         null,
         null,
         createGetProtocolSigRequestProto(TestProtocol1.class,
         createGetProtocolSigRequestProto(TestProtocol1.class,
-            RpcKind.RPC_WRITABLE));
+            RPC.RpcKind.RPC_WRITABLE));
     Assert.assertEquals(1, resp.getProtocolSignatureCount());
     Assert.assertEquals(1, resp.getProtocolSignatureCount());
     ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
     ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
     Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
     Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
@@ -366,7 +365,7 @@ System.out.println("echo int is NOT supported");
   }
   }
   
   
   private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
   private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
-      Class<?> protocol, RpcKind rpcKind) {
+      Class<?> protocol, RPC.RpcKind rpcKind) {
     GetProtocolSignatureRequestProto.Builder builder = 
     GetProtocolSignatureRequestProto.Builder builder = 
         GetProtocolSignatureRequestProto.newBuilder();
         GetProtocolSignatureRequestProto.newBuilder();
     builder.setProtocol(protocol.getName());
     builder.setProtocol(protocol.getName());

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -169,6 +169,19 @@ public class TestNetUtils {
     assertInException(wrapped, "/UnknownHost");
     assertInException(wrapped, "/UnknownHost");
   }
   }
   
   
+  @Test
+  public void testGetConnectAddress() throws IOException {
+    NetUtils.addStaticResolution("host", "127.0.0.1");
+    InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
+    InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
+    assertEquals(addr.getHostName(), connectAddr.getHostName());
+    
+    addr = new InetSocketAddress(1);
+    connectAddr = NetUtils.getConnectAddress(addr);
+    assertEquals(InetAddress.getLocalHost().getHostName(),
+                 connectAddr.getHostName());
+  }
+
   @Test
   @Test
   public void testCreateSocketAddress() throws Throwable {
   public void testCreateSocketAddress() throws Throwable {
     InetSocketAddress addr = NetUtils.createSocketAddr(
     InetSocketAddress addr = NetUtils.createSocketAddr(

+ 62 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestShutdownHookManager {
+
+  @Test
+  public void shutdownHookManager() {
+    ShutdownHookManager mgr = ShutdownHookManager.get();
+    Assert.assertNotNull(mgr);
+    Assert.assertEquals(0, mgr.getShutdownHooksInOrder().size());
+    Runnable hook1 = new Runnable() {
+      @Override
+      public void run() {
+      }
+    };
+    Runnable hook2 = new Runnable() {
+      @Override
+      public void run() {
+      }
+    };
+
+    mgr.addShutdownHook(hook1, 0);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+    Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0));
+    mgr.removeShutdownHook(hook1);
+    Assert.assertFalse(mgr.hasShutdownHook(hook1));
+
+    mgr.addShutdownHook(hook1, 0);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+
+    mgr.addShutdownHook(hook2, 1);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertTrue(mgr.hasShutdownHook(hook2));
+    Assert.assertEquals(2, mgr.getShutdownHooksInOrder().size());
+    Assert.assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0));
+    Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1));
+
+  }
+}

+ 0 - 18
hadoop-common-project/hadoop-common/src/test/resources/test-patch.properties

@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OK_RELEASEAUDIT_WARNINGS=1
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=6

+ 0 - 21
hadoop-hdfs-project/dev-support/test-patch.properties

@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of acceptable warning for this module
-# Please update the root test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=8

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -159,7 +159,7 @@ public class HttpFSFileSystem extends FileSystem {
    * Get operations.
    * Get operations.
    */
    */
   public enum GetOpValues {
   public enum GetOpValues {
-    OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIR, GETCONTENTSUMMARY, GETFILECHECKSUM,
+    OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIRECTORY, GETCONTENTSUMMARY, GETFILECHECKSUM,
     GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
     GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
   }
   }
 
 
@@ -684,7 +684,7 @@ public class HttpFSFileSystem extends FileSystem {
   @Override
   @Override
   public Path getHomeDirectory() {
   public Path getHomeDirectory() {
     Map<String, String> params = new HashMap<String, String>();
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETHOMEDIR.toString());
+    params.put(OP_PARAM, GetOpValues.GETHOMEDIRECTORY.toString());
     try {
     try {
       HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
       HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
       validateResponse(conn, HttpURLConnection.HTTP_OK);
       validateResponse(conn, HttpURLConnection.HTTP_OK);

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

@@ -43,8 +43,8 @@ import java.util.Map;
 public class FSOperations {
 public class FSOperations {
 
 
   /**
   /**
-   * Converts a Unix permission octal & symbolic representation
-   * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
+   * Converts a Unix permission octal
+   * (i.e. 655 or 1777) into a FileSystemAccess permission.
    *
    *
    * @param str Unix permission symbolic representation.
    * @param str Unix permission symbolic representation.
    *
    *
@@ -55,10 +55,8 @@ public class FSOperations {
     FsPermission permission;
     FsPermission permission;
     if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
     if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
       permission = FsPermission.getDefault();
       permission = FsPermission.getDefault();
-    } else if (str.length() == 3) {
-      permission = new FsPermission(Short.parseShort(str, 8));
     } else {
     } else {
-      permission = FsPermission.valueOf(str);
+      permission = new FsPermission(Short.parseShort(str, 8));
     }
     }
     return permission;
     return permission;
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java

@@ -446,7 +446,7 @@ public class HttpFSParams {
      * Symbolic Unix permissions regular expression pattern.
      * Symbolic Unix permissions regular expression pattern.
      */
      */
     private static final Pattern PERMISSION_PATTERN =
     private static final Pattern PERMISSION_PATTERN =
-      Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]");
+      Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
 
 
     /**
     /**
      * Constructor.
      * Constructor.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java

@@ -291,7 +291,7 @@ public class HttpFSServer {
           response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
           response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
           break;
           break;
         }
         }
-        case GETHOMEDIR: {
+        case GETHOMEDIRECTORY: {
           FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
           FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
           JSONObject json = fsExecute(user, doAs.value(), command);
           JSONObject json = fsExecute(user, doAs.value(), command);
           AUDIT_LOG.info("");
           AUDIT_LOG.info("");

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java

@@ -310,11 +310,8 @@ public class TestHttpFSFileSystem extends HFSTestCase {
 
 
   private void testSetPermission() throws Exception {
   private void testSetPermission() throws Exception {
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
-    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
-    OutputStream os = fs.create(path);
-    os.write(1);
-    os.close();
-    fs.close();
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foodir");
+    fs.mkdirs(path);
 
 
     fs = getHttpFileSystem();
     fs = getHttpFileSystem();
     FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
     FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
@@ -326,6 +323,19 @@ public class TestHttpFSFileSystem extends HFSTestCase {
     fs.close();
     fs.close();
     FsPermission permission2 = status1.getPermission();
     FsPermission permission2 = status1.getPermission();
     Assert.assertEquals(permission2, permission1);
     Assert.assertEquals(permission2, permission1);
+
+    //sticky bit 
+    fs = getHttpFileSystem();
+    permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE, true);
+    fs.setPermission(path, permission1);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    status1 = fs.getFileStatus(path);
+    fs.close();
+    permission2 = status1.getPermission();
+    Assert.assertTrue(permission2.getStickyBit());
+    Assert.assertEquals(permission2, permission1);
   }
   }
 
 
   private void testSetOwner() throws Exception {
   private void testSetOwner() throws Exception {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java

@@ -53,7 +53,7 @@ public class TestCheckUploadContentTypeFilter {
 
 
   @Test
   @Test
   public void getOther() throws Exception {
   public void getOther() throws Exception {
-    test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIR.toString(), "plain/text", false, false);
+    test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
   }
   }
 
 
   @Test
   @Test

+ 73 - 2
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -65,8 +65,17 @@ Trunk (unreleased changes)
     HDFS-3273. Refactor BackupImage and FSEditLog, and rename
     HDFS-3273. Refactor BackupImage and FSEditLog, and rename
     JournalListener.rollLogs(..) to startLogSegment(..).  (szetszwo)
     JournalListener.rollLogs(..) to startLogSegment(..).  (szetszwo)
 
 
-    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and
-    getRawUsed() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
+    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawUsed()
+    and getRawCapacity() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
+
+    HADOOP-8285. HDFS changes for Use ProtoBuf for RpcPayLoadHeader. (sanjay
+    radia)
+
+    HDFS-2743. Streamline usage of bookkeeper journal manager. 
+    (Ivan Kelly via umamahesh)
+
+    HDFS-3293. Add toString(), equals(..) and hashCode() to JournalInfo.
+    (Hari Mankude via szetszwo)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -130,6 +139,8 @@ Trunk (unreleased changes)
     (Henry Robinson via atm)
     (Henry Robinson via atm)
 
 
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
+
+    HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
     
     
 Release 2.0.0 - UNRELEASED 
 Release 2.0.0 - UNRELEASED 
 
 
@@ -210,6 +221,10 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
     HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
 
 
+    HDFS-3282. Add HdfsDataInputStream as a public API. (umamahesh)
+
+    HDFS-3298. Add HdfsDataOutputStream as a public API.  (szetszwo)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-2018. Move all journal stream management code into one place.
     HDFS-2018. Move all journal stream management code into one place.
@@ -390,6 +405,20 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu)
     HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu)
 
 
+    HDFS-3206. Miscellaneous xml cleanups for OEV.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3169. TestFsck should test multiple -move operations in a row.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3258. Test for HADOOP-8144 (pseudoSortByDistance in
+    NetworkTopology for first rack local node). (Junping Du via eli)
+
+    HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs.
+    (szetszwo)
+
+    HDFS-3339. Change INode to package private.  (John George via szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -535,6 +564,31 @@ Release 2.0.0 - UNRELEASED
     HDFS-3165. HDFS Balancer scripts are refering to wrong path of
     HDFS-3165. HDFS Balancer scripts are refering to wrong path of
     hadoop-daemon.sh (Amith D K via eli)
     hadoop-daemon.sh (Amith D K via eli)
 
 
+    HDFS-891. DataNode no longer needs to check for dfs.network.script.
+    (harsh via eli)
+
+    HDFS-3305. GetImageServlet should consider SBN a valid requestor in a
+    secure HA setup. (atm)
+
+    HDFS-3314. HttpFS operation for getHomeDirectory is incorrect. (tucu)
+
+    HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
+    (szetszwo)
+
+    HDFS-3181. Fix a test case in TestLeaseRecovery2.  (szetszwo)
+
+    HDFS-3309. HttpFS (Hoop) chmod not supporting octal and sticky bit 
+    permissions. (tucu)
+
+    HDFS-3326. Append enabled log message uses the wrong variable.
+    (Matthew Jacobs via eli)
+
+    HDFS-3336. hdfs launcher script will be better off not special casing 
+    namenode command with regards to hadoop.security.logger (rvs via tucu)
+
+    HDFS-3330. If GetImageServlet throws an Error or RTE, response should not
+    have HTTP "OK" status. (todd)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -871,6 +925,23 @@ Release 0.23.3 - UNRELEASED
     HDFS-2652. Add support for host-based delegation tokens.  (Daryn Sharp via
     HDFS-2652. Add support for host-based delegation tokens.  (Daryn Sharp via
     szetszwo)
     szetszwo)
 
 
+    HDFS-3308. Uses canonical URI to select delegation tokens in HftpFileSystem
+    and WebHdfsFileSystem.  (Daryn Sharp via szetszwo)
+
+    HDFS-3312. In HftpFileSystem, the namenode URI is non-secure but the
+    delegation tokens have to use secure URI.  (Daryn Sharp via szetszwo)
+
+    HDFS-3318. Use BoundedInputStream in ByteRangeInputStream, otherwise, it
+    hangs on transfers >2 GB.  (Daryn Sharp via szetszwo)
+
+    HDFS-3321. Fix safe mode turn off tip message.  (Ravi Prakash via szetszwo)
+
+    HDFS-3334. Fix ByteRangeInputStream stream leakage.  (Daryn Sharp via
+    szetszwo)
+
+    HDFS-3331. In namenode, check superuser privilege for setBalancerBandwidth
+    and acquire the write lock for finalizeUpgrade.  (szetszwo)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 13 - 7
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt

@@ -12,19 +12,25 @@ How do I build?
  To generate the distribution packages for BK journal, do the
  To generate the distribution packages for BK journal, do the
  following.
  following.
 
 
-   $ mvn clean install -Pdist -Dtar
+   $ mvn clean package -Pdist
 
 
- This will generate a tarball, 
- target/hadoop-hdfs-bkjournal-<VERSION>.tar.gz 
+ This will generate a jar with all the dependencies needed by the journal
+ manager, 
+
+ target/hadoop-hdfs-bkjournal-<VERSION>.jar
+
+ Note that the -Pdist part of the build command is important, as otherwise
+ the dependencies would not be packaged in the jar. 
 
 
 -------------------------------------------------------------------------------
 -------------------------------------------------------------------------------
 How do I use the BookKeeper Journal?
 How do I use the BookKeeper Journal?
 
 
- To run a HDFS namenode using BookKeeper as a backend, extract the
- distribution package on top of hdfs
+ To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
+ jar, generated above, into the lib directory of hdfs. In the standard 
+ distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
 
-   cd hadoop-hdfs-<VERSION>/
-   tar --strip-components 1 -zxvf path/to/hadoop-hdfs-bkjournal-<VERSION>.tar.gz
+  cp target/hadoop-hdfs-bkjournal-<VERSION>.jar \
+    $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
 
  Then, in hdfs-site.xml, set the following properties.
  Then, in hdfs-site.xml, set the following properties.
 
 

+ 46 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -65,4 +65,50 @@
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
+  <profiles>
+    <profile>
+      <id>dist</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <version>1.5</version>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <createDependencyReducedPom>false</createDependencyReducedPom>
+                  <artifactSet>
+                    <includes>
+                      <include>org.apache.bookkeeper:bookkeeper-server</include>
+                      <include>org.apache.zookeeper:zookeeper</include>
+                      <include>org.jboss.netty:netty</include>
+                    </includes>
+                  </artifactSet>
+                <relocations>
+                  <relocation>
+                    <pattern>org.apache.bookkeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.bookkeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.apache.zookeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.zookeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.jboss.netty</pattern>
+                    <shadedPattern>hidden.bkjournal.org.jboss.netty</shadedPattern>
+                  </relocation>
+                </relocations>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>
 </project>

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -122,12 +122,7 @@ if $cygwin; then
 fi
 fi
 export CLASSPATH=$CLASSPATH
 export CLASSPATH=$CLASSPATH
 
 
-#turn security logger on the namenode
-if [ $COMMAND = "namenode" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}"
-else
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
-fi
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
 
 
 # Check to see if we should start a secure datanode
 # Check to see if we should start a secure datanode
 if [ "$starting_secure_dn" = "true" ]; then
 if [ "$starting_secure_dn" = "true" ]; then

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -35,6 +35,8 @@ import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,8 +45,8 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
@@ -88,11 +90,11 @@ public class Hdfs extends AbstractFileSystem {
   }
   }
 
 
   @Override
   @Override
-  public FSDataOutputStream createInternal(Path f,
+  public HdfsDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bytesPerChecksum, boolean createParent) throws IOException {
       int bytesPerChecksum, boolean createParent) throws IOException {
-    return new FSDataOutputStream(dfs.primitiveCreate(getUriPath(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
         absolutePermission, createFlag, createParent, replication, blockSize,
         absolutePermission, createFlag, createParent, replication, blockSize,
         progress, bufferSize, bytesPerChecksum), getStatistics());
         progress, bufferSize, bytesPerChecksum), getStatistics());
   }
   }
@@ -324,8 +326,9 @@ public class Hdfs extends AbstractFileSystem {
     dfs.mkdirs(getUriPath(dir), permission, createParent);
     dfs.mkdirs(getUriPath(dir), permission, createParent);
   }
   }
 
 
+  @SuppressWarnings("deprecation")
   @Override
   @Override
-  public FSDataInputStream open(Path f, int bufferSize) 
+  public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
     return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
     return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
         bufferSize, verifyChecksum));
         bufferSize, verifyChecksum));

+ 71 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java

@@ -23,9 +23,12 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 
 
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
 /**
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * created each time. This class hides the complexity of those multiple 
  * created each time. This class hides the complexity of those multiple 
@@ -60,7 +63,7 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   }
   }
 
 
   enum StreamStatus {
   enum StreamStatus {
-    NORMAL, SEEK
+    NORMAL, SEEK, CLOSED
   }
   }
   protected InputStream in;
   protected InputStream in;
   protected URLOpener originalURL;
   protected URLOpener originalURL;
@@ -88,66 +91,93 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
       ) throws IOException;
       ) throws IOException;
 
 
-  private InputStream getInputStream() throws IOException {
-    if (status != StreamStatus.NORMAL) {
-      
-      if (in != null) {
-        in.close();
-        in = null;
-      }
-      
-      // Use the original url if no resolved url exists, eg. if
-      // it's the first time a request is made.
-      final URLOpener opener =
-        (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
-
-      final HttpURLConnection connection = opener.openConnection(startPos);
-      connection.connect();
-      checkResponseCode(connection);
-
-      final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
-      filelength = (cl == null) ? -1 : Long.parseLong(cl);
-      in = connection.getInputStream();
-
-      resolvedURL.setURL(getResolvedUrl(connection));
-      status = StreamStatus.NORMAL;
+  @VisibleForTesting
+  protected InputStream getInputStream() throws IOException {
+    switch (status) {
+      case NORMAL:
+        break;
+      case SEEK:
+        if (in != null) {
+          in.close();
+        }
+        in = openInputStream();
+        status = StreamStatus.NORMAL;
+        break;
+      case CLOSED:
+        throw new IOException("Stream closed");
     }
     }
-    
     return in;
     return in;
   }
   }
   
   
-  private void update(final boolean isEOF, final int n)
-      throws IOException {
-    if (!isEOF) {
+  @VisibleForTesting
+  protected InputStream openInputStream() throws IOException {
+    // Use the original url if no resolved url exists, eg. if
+    // it's the first time a request is made.
+    final URLOpener opener =
+      (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
+
+    final HttpURLConnection connection = opener.openConnection(startPos);
+    connection.connect();
+    checkResponseCode(connection);
+
+    final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
+    if (cl == null) {
+      throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
+    }
+    final long streamlength = Long.parseLong(cl);
+    filelength = startPos + streamlength;
+    // Java has a bug with >2GB request streams.  It won't bounds check
+    // the reads so the transfer blocks until the server times out
+    InputStream is =
+        new BoundedInputStream(connection.getInputStream(), streamlength);
+
+    resolvedURL.setURL(getResolvedUrl(connection));
+    
+    return is;
+  }
+  
+  private int update(final int n) throws IOException {
+    if (n != -1) {
       currentPos += n;
       currentPos += n;
     } else if (currentPos < filelength) {
     } else if (currentPos < filelength) {
       throw new IOException("Got EOF but currentPos = " + currentPos
       throw new IOException("Got EOF but currentPos = " + currentPos
           + " < filelength = " + filelength);
           + " < filelength = " + filelength);
     }
     }
+    return n;
   }
   }
 
 
+  @Override
   public int read() throws IOException {
   public int read() throws IOException {
     final int b = getInputStream().read();
     final int b = getInputStream().read();
-    update(b == -1, 1);
+    update((b == -1) ? -1 : 1);
     return b;
     return b;
   }
   }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+    return update(getInputStream().read(b, off, len));
+  }
   
   
   /**
   /**
    * Seek to the given offset from the start of the file.
    * Seek to the given offset from the start of the file.
    * The next read() will be from that location.  Can't
    * The next read() will be from that location.  Can't
    * seek past the end of the file.
    * seek past the end of the file.
    */
    */
+  @Override
   public void seek(long pos) throws IOException {
   public void seek(long pos) throws IOException {
     if (pos != currentPos) {
     if (pos != currentPos) {
       startPos = pos;
       startPos = pos;
       currentPos = pos;
       currentPos = pos;
-      status = StreamStatus.SEEK;
+      if (status != StreamStatus.CLOSED) {
+        status = StreamStatus.SEEK;
+      }
     }
     }
   }
   }
 
 
   /**
   /**
    * Return the current offset from the start of the file
    * Return the current offset from the start of the file
    */
    */
+  @Override
   public long getPos() throws IOException {
   public long getPos() throws IOException {
     return currentPos;
     return currentPos;
   }
   }
@@ -156,7 +186,17 @@ public abstract class ByteRangeInputStream extends FSInputStream {
    * Seeks a different copy of the data.  Returns true if
    * Seeks a different copy of the data.  Returns true if
    * found a new source, false otherwise.
    * found a new source, false otherwise.
    */
    */
+  @Override
   public boolean seekToNewSource(long targetPos) throws IOException {
   public boolean seekToNewSource(long targetPos) throws IOException {
     return false;
     return false;
   }
   }
-}
+  
+  @Override
+  public void close() throws IOException {
+    if (in != null) {
+      in.close();
+      in = null;
+    }
+    status = StreamStatus.CLOSED;
+  }
+}

+ 17 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -78,8 +78,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -91,6 +89,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -996,7 +996,7 @@ public class DFSClient implements java.io.Closeable {
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * long, Progressable, int)} with <code>createParent</code> set to true.
    * long, Progressable, int)} with <code>createParent</code> set to true.
    */
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              EnumSet<CreateFlag> flag, 
                              short replication,
                              short replication,
@@ -1029,7 +1029,7 @@ public class DFSClient implements java.io.Closeable {
    * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
    * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
    * boolean, short, long) for detailed description of exceptions thrown
    * boolean, short, long) for detailed description of exceptions thrown
    */
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              EnumSet<CreateFlag> flag, 
                              boolean createParent,
                              boolean createParent,
@@ -1046,9 +1046,9 @@ public class DFSClient implements java.io.Closeable {
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug(src + ": masked=" + masked);
       LOG.debug(src + ": masked=" + masked);
     }
     }
-    final DFSOutputStream result = new DFSOutputStream(this, src, masked, flag,
-        createParent, replication, blockSize, progress, buffersize,
-        dfsClientConf.createChecksum());
+    final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
+        src, masked, flag, createParent, replication, blockSize, progress,
+        buffersize, dfsClientConf.createChecksum());
     leaserenewer.put(src, result, this);
     leaserenewer.put(src, result, this);
     return result;
     return result;
   }
   }
@@ -1078,7 +1078,7 @@ public class DFSClient implements java.io.Closeable {
    *  Progressable, int)} except that the permission
    *  Progressable, int)} except that the permission
    *  is absolute (ie has already been masked with umask.
    *  is absolute (ie has already been masked with umask.
    */
    */
-  public OutputStream primitiveCreate(String src, 
+  public DFSOutputStream primitiveCreate(String src, 
                              FsPermission absPermission,
                              FsPermission absPermission,
                              EnumSet<CreateFlag> flag,
                              EnumSet<CreateFlag> flag,
                              boolean createParent,
                              boolean createParent,
@@ -1095,7 +1095,7 @@ public class DFSClient implements java.io.Closeable {
       DataChecksum checksum = DataChecksum.newDataChecksum(
       DataChecksum checksum = DataChecksum.newDataChecksum(
           dfsClientConf.checksumType,
           dfsClientConf.checksumType,
           bytesPerChecksum);
           bytesPerChecksum);
-      result = new DFSOutputStream(this, src, absPermission,
+      result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
           flag, createParent, replication, blockSize, progress, buffersize,
           checksum);
           checksum);
     }
     }
@@ -1154,7 +1154,7 @@ public class DFSClient implements java.io.Closeable {
                                      UnsupportedOperationException.class,
                                      UnsupportedOperationException.class,
                                      UnresolvedPathException.class);
                                      UnresolvedPathException.class);
     }
     }
-    return new DFSOutputStream(this, src, buffersize, progress,
+    return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
         lastBlock, stat, dfsClientConf.createChecksum());
         lastBlock, stat, dfsClientConf.createChecksum());
   }
   }
   
   
@@ -1169,11 +1169,11 @@ public class DFSClient implements java.io.Closeable {
    * 
    * 
    * @see ClientProtocol#append(String, String) 
    * @see ClientProtocol#append(String, String) 
    */
    */
-  public FSDataOutputStream append(final String src, final int buffersize,
+  public HdfsDataOutputStream append(final String src, final int buffersize,
       final Progressable progress, final FileSystem.Statistics statistics
       final Progressable progress, final FileSystem.Statistics statistics
       ) throws IOException {
       ) throws IOException {
     final DFSOutputStream out = append(src, buffersize, progress);
     final DFSOutputStream out = append(src, buffersize, progress);
-    return new FSDataOutputStream(out, statistics, out.getInitialLen());
+    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
   }
   }
 
 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 
@@ -1809,41 +1809,13 @@ public class DFSClient implements java.io.Closeable {
   }
   }
 
 
   /**
   /**
-   * The Hdfs implementation of {@link FSDataInputStream}
+   * @deprecated use {@link HdfsDataInputStream} instead.
    */
    */
-  @InterfaceAudience.Private
-  public static class DFSDataInputStream extends FSDataInputStream {
-    public DFSDataInputStream(DFSInputStream in)
-      throws IOException {
-      super(in);
-    }
-      
-    /**
-     * Returns the datanode from which the stream is currently reading.
-     */
-    public DatanodeInfo getCurrentDatanode() {
-      return ((DFSInputStream)in).getCurrentDatanode();
-    }
-      
-    /**
-     * Returns the block containing the target position. 
-     */
-    public ExtendedBlock getCurrentBlock() {
-      return ((DFSInputStream)in).getCurrentBlock();
-    }
+  @Deprecated
+  public static class DFSDataInputStream extends HdfsDataInputStream {
 
 
-    /**
-     * Return collection of blocks that has already been located.
-     */
-    synchronized List<LocatedBlock> getAllBlocks() throws IOException {
-      return ((DFSInputStream)in).getAllBlocks();
-    }
-    
-    /**
-     * @return The visible length of the file.
-     */
-    public long getVisibleLength() throws IOException {
-      return ((DFSInputStream)in).getFileLength();
+    public DFSDataInputStream(DFSInputStream in) throws IOException {
+      super(in);
     }
     }
   }
   }
   
   

+ 40 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -118,6 +118,39 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
    * Grab the open-file info from namenode
    * Grab the open-file info from namenode
    */
    */
   synchronized void openInfo() throws IOException, UnresolvedLinkException {
   synchronized void openInfo() throws IOException, UnresolvedLinkException {
+    lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+    int retriesForLastBlockLength = 3;
+    while (retriesForLastBlockLength > 0) {
+      // Getting last block length as -1 is a special case. When cluster
+      // restarts, DNs may not report immediately. At this time partial block
+      // locations will not be available with NN for getting the length. Lets
+      // retry for 3 times to get the length.
+      if (lastBlockBeingWrittenLength == -1) {
+        DFSClient.LOG.warn("Last block locations not available. "
+            + "Datanodes might not have reported blocks completely."
+            + " Will retry for " + retriesForLastBlockLength + " times");
+        waitFor(4000);
+        lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+      } else {
+        break;
+      }
+      retriesForLastBlockLength--;
+    }
+    if (retriesForLastBlockLength == 0) {
+      throw new IOException("Could not obtain the last block locations.");
+    }
+  }
+
+  private void waitFor(int waitTime) throws IOException {
+    try {
+      Thread.sleep(waitTime);
+    } catch (InterruptedException e) {
+      throw new IOException(
+          "Interrupted while getting the last block length.");
+    }
+  }
+
+  private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException {
     LocatedBlocks newInfo = DFSClient.callGetBlockLocations(dfsClient.namenode, src, 0, prefetchSize);
     LocatedBlocks newInfo = DFSClient.callGetBlockLocations(dfsClient.namenode, src, 0, prefetchSize);
     if (DFSClient.LOG.isDebugEnabled()) {
     if (DFSClient.LOG.isDebugEnabled()) {
       DFSClient.LOG.debug("newInfo = " + newInfo);
       DFSClient.LOG.debug("newInfo = " + newInfo);
@@ -136,10 +169,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       }
       }
     }
     }
     locatedBlocks = newInfo;
     locatedBlocks = newInfo;
-    lastBlockBeingWrittenLength = 0;
+    long lastBlockBeingWrittenLength = 0;
     if (!locatedBlocks.isLastBlockComplete()) {
     if (!locatedBlocks.isLastBlockComplete()) {
       final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
       final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
       if (last != null) {
       if (last != null) {
+        if (last.getLocations().length == 0) {
+          return -1;
+        }
         final long len = readBlockLength(last);
         final long len = readBlockLength(last);
         last.getBlock().setNumBytes(len);
         last.getBlock().setNumBytes(len);
         lastBlockBeingWrittenLength = len; 
         lastBlockBeingWrittenLength = len; 
@@ -147,13 +183,12 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
     }
     }
 
 
     currentNode = null;
     currentNode = null;
+    return lastBlockBeingWrittenLength;
   }
   }
 
 
   /** Read the block length from one of the datanodes. */
   /** Read the block length from one of the datanodes. */
   private long readBlockLength(LocatedBlock locatedblock) throws IOException {
   private long readBlockLength(LocatedBlock locatedblock) throws IOException {
-    if (locatedblock == null || locatedblock.getLocations().length == 0) {
-      return 0;
-    }
+    assert locatedblock != null : "LocatedBlock cannot be null";
     int replicaNotFoundCount = locatedblock.getLocations().length;
     int replicaNotFoundCount = locatedblock.getLocations().length;
     
     
     for(DatanodeInfo datanode : locatedblock.getLocations()) {
     for(DatanodeInfo datanode : locatedblock.getLocations()) {
@@ -224,7 +259,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
   /**
   /**
    * Return collection of blocks that has already been located.
    * Return collection of blocks that has already been located.
    */
    */
-  synchronized List<LocatedBlock> getAllBlocks() throws IOException {
+  public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
     return getBlockRange(0, getFileLength());
     return getBlockRange(0, getFileLength());
   }
   }
 
 

+ 41 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -99,7 +99,7 @@ import org.apache.hadoop.util.Progressable;
  * starts sending packets from the dataQueue.
  * starts sending packets from the dataQueue.
 ****************************************************************/
 ****************************************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-class DFSOutputStream extends FSOutputSummer implements Syncable {
+public class DFSOutputStream extends FSOutputSummer implements Syncable {
   private final DFSClient dfsClient;
   private final DFSClient dfsClient;
   private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB
   private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB
   private Socket s;
   private Socket s;
@@ -1233,14 +1233,11 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
     this.checksum = checksum;
     this.checksum = checksum;
   }
   }
 
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize, Progressable progress,
-      int buffersize, DataChecksum checksum) 
-      throws IOException {
+  /** Construct a new output stream for creating a file. */
+  private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
     this(dfsClient, src, blockSize, progress, checksum, replication);
     this(dfsClient, src, blockSize, progress, checksum, replication);
 
 
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
@@ -1260,14 +1257,21 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
                                      UnresolvedPathException.class);
                                      UnresolvedPathException.class);
     }
     }
     streamer = new DataStreamer();
     streamer = new DataStreamer();
-    streamer.start();
   }
   }
 
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
+  static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
+      FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
+      short replication, long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
+        flag, createParent, replication, blockSize, progress, buffersize,
+        checksum);
+    out.streamer.start();
+    return out;
+  }
+
+  /** Construct a new output stream for append. */
+  private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
       LocatedBlock lastBlock, HdfsFileStatus stat,
       LocatedBlock lastBlock, HdfsFileStatus stat,
       DataChecksum checksum) throws IOException {
       DataChecksum checksum) throws IOException {
     this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
     this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
@@ -1285,7 +1289,15 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
           checksum.getBytesPerChecksum());
           checksum.getBytesPerChecksum());
       streamer = new DataStreamer();
       streamer = new DataStreamer();
     }
     }
-    streamer.start();
+  }
+
+  static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
+      int buffersize, Progressable progress, LocatedBlock lastBlock,
+      HdfsFileStatus stat, DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize,
+        progress, lastBlock, stat, checksum);
+    out.streamer.start();
+    return out;
   }
   }
 
 
   private void computePacketChunkSize(int psize, int csize) {
   private void computePacketChunkSize(int psize, int csize) {
@@ -1530,14 +1542,20 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
   }
   }
 
 
   /**
   /**
-   * Returns the number of replicas of current block. This can be different
-   * from the designated replication factor of the file because the NameNode
-   * does not replicate the block to which a client is currently writing to.
-   * The client continues to write to a block even if a few datanodes in the
-   * write pipeline have failed. 
-   * @return the number of valid replicas of the current block
+   * @deprecated use {@link HdfsDataOutputStream#getCurrentBlockReplication()}.
    */
    */
+  @Deprecated
   public synchronized int getNumCurrentReplicas() throws IOException {
   public synchronized int getNumCurrentReplicas() throws IOException {
+    return getCurrentBlockReplication();
+  }
+
+  /**
+   * Note that this is not a public API;
+   * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead.
+   * 
+   * @return the number of valid replicas of the current block
+   */
+  public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
     dfsClient.checkOpen();
     isClosed();
     isClosed();
     if (streamer == null) {
     if (streamer == null) {

+ 1 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -139,37 +138,6 @@ public class DFSUtil {
     return true;
     return true;
   }
   }
 
 
-  /**
-   * Utility class to facilitate junit test error simulation.
-   */
-  @InterfaceAudience.Private
-  public static class ErrorSimulator {
-    private static boolean[] simulation = null; // error simulation events
-    public static void initializeErrorSimulationEvent(int numberOfEvents) {
-      simulation = new boolean[numberOfEvents]; 
-      for (int i = 0; i < numberOfEvents; i++) {
-        simulation[i] = false;
-      }
-    }
-    
-    public static boolean getErrorSimulation(int index) {
-      if(simulation == null)
-        return false;
-      assert(index < simulation.length);
-      return simulation[index];
-    }
-    
-    public static void setErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = true;
-    }
-    
-    public static void clearErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = false;
-    }
-  }
-
   /**
   /**
    * Converts a byte array to a string using UTF8 encoding.
    * Converts a byte array to a string using UTF8 encoding.
    */
    */
@@ -1010,7 +978,7 @@ public class DFSUtil {
   public static void addPBProtocol(Configuration conf, Class<?> protocol,
   public static void addPBProtocol(Configuration conf, Class<?> protocol,
       BlockingService service, RPC.Server server) throws IOException {
       BlockingService service, RPC.Server server) throws IOException {
     RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
     RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
-    server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
+    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
   }
   }
 
 
   /**
   /**

+ 32 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -45,7 +44,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -88,6 +88,17 @@ public class DistributedFileSystem extends FileSystem {
   public DistributedFileSystem() {
   public DistributedFileSystem() {
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hdfs";
+  }
+
   @Deprecated
   @Deprecated
   public DistributedFileSystem(InetSocketAddress namenode,
   public DistributedFileSystem(InetSocketAddress namenode,
     Configuration conf) throws IOException {
     Configuration conf) throws IOException {
@@ -194,8 +205,9 @@ public class DistributedFileSystem extends FileSystem {
     return dfs.recoverLease(getPathName(f));
     return dfs.recoverLease(getPathName(f));
   }
   }
 
 
+  @SuppressWarnings("deprecation")
   @Override
   @Override
-  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+  public HdfsDataInputStream open(Path f, int bufferSize) throws IOException {
     statistics.incrementReadOps(1);
     statistics.incrementReadOps(1);
     return new DFSClient.DFSDataInputStream(
     return new DFSClient.DFSDataInputStream(
           dfs.open(getPathName(f), bufferSize, verifyChecksum));
           dfs.open(getPathName(f), bufferSize, verifyChecksum));
@@ -203,31 +215,33 @@ public class DistributedFileSystem extends FileSystem {
 
 
   /** This optional operation is not yet supported. */
   /** This optional operation is not yet supported. */
   @Override
   @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
+  public HdfsDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     return dfs.append(getPathName(f), bufferSize, progress, statistics);
     return dfs.append(getPathName(f), bufferSize, progress, statistics);
   }
   }
 
 
   @Override
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
+  public HdfsDataOutputStream create(Path f, FsPermission permission,
     boolean overwrite, int bufferSize, short replication, long blockSize,
     boolean overwrite, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
     Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission,
-        overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
-            : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
-        bufferSize), statistics);
+    final EnumSet<CreateFlag> cflags = overwrite?
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+        : EnumSet.of(CreateFlag.CREATE);
+    final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
+        replication, blockSize, progress, bufferSize);
+    return new HdfsDataOutputStream(out, statistics);
   }
   }
   
   
   @SuppressWarnings("deprecation")
   @SuppressWarnings("deprecation")
   @Override
   @Override
-  protected FSDataOutputStream primitiveCreate(Path f,
+  protected HdfsDataOutputStream primitiveCreate(Path f,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     short replication, long blockSize, Progressable progress,
     short replication, long blockSize, Progressable progress,
     int bytesPerChecksum) throws IOException {
     int bytesPerChecksum) throws IOException {
     statistics.incrementReadOps(1);
     statistics.incrementReadOps(1);
-    return new FSDataOutputStream(dfs.primitiveCreate(getPathName(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
         absolutePermission, flag, true, replication, blockSize,
         absolutePermission, flag, true, replication, blockSize,
         progress, bufferSize, bytesPerChecksum),statistics);
         progress, bufferSize, bytesPerChecksum),statistics);
    } 
    } 
@@ -235,14 +249,14 @@ public class DistributedFileSystem extends FileSystem {
   /**
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
    * Same as create(), except fails if parent directory doesn't already exist.
    */
    */
-  public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+  public HdfsDataOutputStream createNonRecursive(Path f, FsPermission permission,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       long blockSize, Progressable progress) throws IOException {
       long blockSize, Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     if (flag.contains(CreateFlag.OVERWRITE)) {
     if (flag.contains(CreateFlag.OVERWRITE)) {
       flag.add(CreateFlag.CREATE);
       flag.add(CreateFlag.CREATE);
     }
     }
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission, flag,
+    return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
         false, replication, blockSize, progress, bufferSize), statistics);
         false, replication, blockSize, progress, bufferSize), statistics);
   }
   }
 
 
@@ -627,14 +641,14 @@ public class DistributedFileSystem extends FileSystem {
     FSDataInputStream in, long inPos, 
     FSDataInputStream in, long inPos, 
     FSDataInputStream sums, long sumsPos) {
     FSDataInputStream sums, long sumsPos) {
     
     
-    if(!(in instanceof DFSDataInputStream && sums instanceof DFSDataInputStream))
-      throw new IllegalArgumentException("Input streams must be types " +
-                                         "of DFSDataInputStream");
+    if(!(in instanceof HdfsDataInputStream && sums instanceof HdfsDataInputStream))
+      throw new IllegalArgumentException(
+          "Input streams must be types of HdfsDataInputStream");
     
     
     LocatedBlock lblocks[] = new LocatedBlock[2];
     LocatedBlock lblocks[] = new LocatedBlock[2];
 
 
     // Find block in data stream.
     // Find block in data stream.
-    DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in;
+    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
     ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     if (dataBlock == null) {
     if (dataBlock == null) {
       LOG.error("Error: Current block in data stream is null! ");
       LOG.error("Error: Current block in data stream is null! ");
@@ -647,7 +661,7 @@ public class DistributedFileSystem extends FileSystem {
         + dataNode[0]);
         + dataNode[0]);
 
 
     // Find block in checksum stream
     // Find block in checksum stream
-    DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
+    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
     ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     if (sumsBlock == null) {
     if (sumsBlock == null) {
       LOG.error("Error: Current block in checksum stream is null! ");
       LOG.error("Error: Current block in checksum stream is null! ");

+ 31 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -94,8 +94,8 @@ public class HftpFileSystem extends FileSystem
   protected UserGroupInformation ugi;
   protected UserGroupInformation ugi;
   private URI hftpURI;
   private URI hftpURI;
 
 
-  protected InetSocketAddress nnAddr;
-  protected InetSocketAddress nnSecureAddr;
+  protected URI nnUri;
+  protected URI nnSecureUri;
 
 
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
@@ -139,11 +139,30 @@ public class HftpFileSystem extends FileSystem
     return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort());
     return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort());
   }
   }
 
 
+  protected URI getNamenodeUri(URI uri) {
+    return DFSUtil.createUri("http", getNamenodeAddr(uri));
+  }
+
+  protected URI getNamenodeSecureUri(URI uri) {
+    return DFSUtil.createUri("https", getNamenodeSecureAddr(uri));
+  }
+
   @Override
   @Override
   public String getCanonicalServiceName() {
   public String getCanonicalServiceName() {
     // unlike other filesystems, hftp's service is the secure port, not the
     // unlike other filesystems, hftp's service is the secure port, not the
     // actual port in the uri
     // actual port in the uri
-    return SecurityUtil.buildTokenService(nnSecureAddr).toString();
+    return SecurityUtil.buildTokenService(nnSecureUri).toString();
+  }
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hftp";
   }
   }
 
 
   @Override
   @Override
@@ -152,8 +171,8 @@ public class HftpFileSystem extends FileSystem
     super.initialize(name, conf);
     super.initialize(name, conf);
     setConf(conf);
     setConf(conf);
     this.ugi = UserGroupInformation.getCurrentUser(); 
     this.ugi = UserGroupInformation.getCurrentUser(); 
-    this.nnAddr = getNamenodeAddr(name);
-    this.nnSecureAddr = getNamenodeSecureAddr(name);
+    this.nnUri = getNamenodeUri(name);
+    this.nnSecureUri = getNamenodeSecureUri(name);
     try {
     try {
       this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
       this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
                              null, null, null);
                              null, null, null);
@@ -168,7 +187,7 @@ public class HftpFileSystem extends FileSystem
 
 
   protected void initDelegationToken() throws IOException {
   protected void initDelegationToken() throws IOException {
     // look for hftp token, then try hdfs
     // look for hftp token, then try hdfs
-    Token<?> token = selectDelegationToken();
+    Token<?> token = selectDelegationToken(ugi);
 
 
     // if we don't already have a token, go get one over https
     // if we don't already have a token, go get one over https
     boolean createdToken = false;
     boolean createdToken = false;
@@ -189,8 +208,9 @@ public class HftpFileSystem extends FileSystem
     }
     }
   }
   }
 
 
-  protected Token<DelegationTokenIdentifier> selectDelegationToken() {
-  	return hftpTokenSelector.selectToken(getUri(), ugi.getTokens(), getConf());
+  protected Token<DelegationTokenIdentifier> selectDelegationToken(
+      UserGroupInformation ugi) {
+  	return hftpTokenSelector.selectToken(nnSecureUri, ugi.getTokens(), getConf());
   }
   }
   
   
 
 
@@ -221,7 +241,7 @@ public class HftpFileSystem extends FileSystem
       ugi.reloginFromKeytab();
       ugi.reloginFromKeytab();
       return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
       return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
         public Token<?> run() throws IOException {
         public Token<?> run() throws IOException {
-          final String nnHttpUrl = DFSUtil.createUri("https", nnSecureAddr).toString();
+          final String nnHttpUrl = nnSecureUri.toString();
           Credentials c;
           Credentials c;
           try {
           try {
             c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
             c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
@@ -263,8 +283,8 @@ public class HftpFileSystem extends FileSystem
    * @throws IOException on error constructing the URL
    * @throws IOException on error constructing the URL
    */
    */
   protected URL getNamenodeURL(String path, String query) throws IOException {
   protected URL getNamenodeURL(String path, String query) throws IOException {
-    final URL url = new URL("http", nnAddr.getHostName(),
-          nnAddr.getPort(), path + '?' + query);
+    final URL url = new URL("http", nnUri.getHost(),
+          nnUri.getPort(), path + '?' + query);
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("url=" + url);
       LOG.trace("url=" + url);
     }
     }

+ 18 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java

@@ -58,6 +58,17 @@ public class HsftpFileSystem extends HftpFileSystem {
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
   private volatile int ExpWarnDays = 0;
   private volatile int ExpWarnDays = 0;
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hsftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hsftp";
+  }
+
   @Override
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
     super.initialize(name, conf);
@@ -132,12 +143,17 @@ public class HsftpFileSystem extends HftpFileSystem {
     return getNamenodeAddr(uri);
     return getNamenodeAddr(uri);
   }
   }
 
 
+  @Override
+  protected URI getNamenodeUri(URI uri) {
+    return getNamenodeSecureUri(uri);
+  }
+  
   @Override
   @Override
   protected HttpURLConnection openConnection(String path, String query)
   protected HttpURLConnection openConnection(String path, String query)
       throws IOException {
       throws IOException {
     query = addDelegationTokenParam(query);
     query = addDelegationTokenParam(query);
-    final URL url = new URL("https", nnAddr.getHostName(), 
-        nnAddr.getPort(), path + '?' + query);
+    final URL url = new URL("https", nnUri.getHost(), 
+        nnUri.getPort(), path + '?' + query);
     HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url);
     HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url);
     // bypass hostname verification
     // bypass hostname verification
     conn.setHostnameVerifier(new DummyHostnameVerifier());
     conn.setHostnameVerifier(new DummyHostnameVerifier());

+ 71 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+
+/**
+ * The Hdfs implementation of {@link FSDataInputStream}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsDataInputStream extends FSDataInputStream {
+  public HdfsDataInputStream(DFSInputStream in) throws IOException {
+    super(in);
+  }
+
+  /**
+   * Get the datanode from which the stream is currently reading.
+   */
+  public DatanodeInfo getCurrentDatanode() {
+    return ((DFSInputStream) in).getCurrentDatanode();
+  }
+
+  /**
+   * Get the block containing the target position.
+   */
+  public ExtendedBlock getCurrentBlock() {
+    return ((DFSInputStream) in).getCurrentBlock();
+  }
+
+  /**
+   * Get the collection of blocks that has already been located.
+   */
+  public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
+    return ((DFSInputStream) in).getAllBlocks();
+  }
+
+  /**
+   * Get the visible length of the file. It will include the length of the last
+   * block even if that is in UnderConstruction state.
+   * 
+   * @return The visible length of the file.
+   */
+  public long getVisibleLength() throws IOException {
+    return ((DFSInputStream) in).getFileLength();
+  }
+}

+ 59 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+
+/**
+ * The Hdfs implementation of {@link FSDataOutputStream}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsDataOutputStream extends FSDataOutputStream {
+  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats,
+      long startPosition) throws IOException {
+    super(out, stats, startPosition);
+  }
+
+  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats
+      ) throws IOException {
+    this(out, stats, 0L);
+  }
+
+  /**
+   * Get the actual number of replicas of the current block.
+   * 
+   * This can be different from the designated replication factor of the file
+   * because the namenode does not maintain replication for the blocks which are
+   * currently being written to. Depending on the configuration, the client may
+   * continue to write to a block even if a few datanodes in the write pipeline
+   * have failed, or the client may add a new datanodes once a datanode has
+   * failed.
+   * 
+   * @return the number of valid replicas of the current block
+   */
+  public synchronized int getCurrentBlockReplication() throws IOException {
+    return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
+  }
+}

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -46,7 +46,6 @@ import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -193,7 +192,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy,
     return RpcClientUtil.isMethodSupported(rpcProxy,
-        ClientDatanodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        ClientDatanodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), methodName);
         RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), methodName);
   }
   }
 
 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -109,7 +109,6 @@ import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
@@ -812,7 +811,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy,
     return RpcClientUtil.isMethodSupported(rpcProxy,
-        ClientNamenodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), methodName);
         RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), methodName);
   }
   }
 
 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -69,7 +69,6 @@ import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
@@ -308,7 +307,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   public boolean isMethodSupported(String methodName)
   public boolean isMethodSupported(String methodName)
       throws IOException {
       throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy, DatanodeProtocolPB.class,
     return RpcClientUtil.isMethodSupported(rpcProxy, DatanodeProtocolPB.class,
-        RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(DatanodeProtocolPB.class), methodName);
         RPC.getProtocolVersion(DatanodeProtocolPB.class), methodName);
   }
   }
 }
 }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java

@@ -26,7 +26,6 @@ import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
@@ -65,7 +64,7 @@ public class GetUserMappingsProtocolClientSideTranslatorPB implements
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy,
     return RpcClientUtil.isMethodSupported(rpcProxy,
-        GetUserMappingsProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        GetUserMappingsProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
         RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
   }
   }
 }
 }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java

@@ -39,7 +39,6 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
@@ -119,7 +118,7 @@ public class InterDatanodeProtocolTranslatorPB implements
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy,
     return RpcClientUtil.isMethodSupported(rpcProxy,
-        InterDatanodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
+        InterDatanodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(InterDatanodeProtocolPB.class), methodName);
         RPC.getProtocolVersion(InterDatanodeProtocolPB.class), methodName);
   }
   }
 }
 }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
@@ -109,7 +108,7 @@ public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
     return RpcClientUtil.isMethodSupported(rpcProxy, JournalProtocolPB.class,
     return RpcClientUtil.isMethodSupported(rpcProxy, JournalProtocolPB.class,
-        RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(JournalProtocolPB.class), methodName);
         RPC.getProtocolVersion(JournalProtocolPB.class), methodName);
   }
   }
 }
 }

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor