Przeglądaj źródła

Merge r1329944 through r1332459 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1332461 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 lat temu
rodzic
commit
5747161adb
99 zmienionych plików z 2510 dodań i 474 usunięć
  1. 4 0
      BUILDING.txt
  2. 14 2
      dev-support/test-patch.sh
  3. 28 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  4. 23 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  5. 9 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  6. 10 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  7. 60 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  8. 9 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
  9. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  10. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  11. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  12. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java
  13. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  14. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  15. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  16. 60 77
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  17. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  18. 3 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  19. 13 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  20. 10 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  21. 181 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
  22. 14 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  23. 22 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  24. 0 70
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  25. 22 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  26. 176 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
  27. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
  28. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
  29. 4 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  30. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  31. 96 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  32. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
  33. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  34. 62 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
  35. 3 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
  36. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
  37. 15 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
  38. 43 4
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  39. 13 7
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
  40. 46 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  41. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  42. 71 31
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
  43. 11 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  44. 39 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  45. 41 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  46. 29 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  47. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  48. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
  49. 59 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
  50. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  51. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
  52. 21 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  53. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  54. 25 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
  55. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  56. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
  57. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  58. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  59. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  60. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  61. 78 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
  62. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
  63. 15 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  64. 94 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
  65. 40 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
  66. 37 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
  67. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
  68. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
  69. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
  70. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  71. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
  72. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  73. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
  74. 27 0
      hadoop-mapreduce-project/CHANGES.txt
  75. 21 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
  76. 2 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
  77. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counters.java
  78. 2 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
  79. 6 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java
  80. 19 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
  81. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
  82. 1 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
  83. 11 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
  84. 33 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
  85. 37 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/xsl/configuration.xsl
  86. 9 16
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
  87. 7 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
  88. 15 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
  89. 68 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
  90. 8 5
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
  91. 15 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
  92. 3 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
  93. 1 1
      hadoop-project/src/site/site.xml
  94. 1 1
      hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
  95. 6 6
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
  96. 2 2
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCp.java
  97. 485 0
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
  98. 2 2
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
  99. 2 2
      hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java

+ 4 - 0
BUILDING.txt

@@ -87,4 +87,8 @@ Create source and binary distributions with native code and documentation:
 
 
   $ mvn package -Pdist,native,docs,src -DskipTests -Dtar
   $ mvn package -Pdist,native,docs,src -DskipTests -Dtar
 
 
+Create a local staging version of the website (in /tmp/hadoop-site)
+
+  $ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+
 ----------------------------------------------------------------------------------
 ----------------------------------------------------------------------------------

+ 14 - 2
dev-support/test-patch.sh

@@ -39,6 +39,7 @@ WGET=${WGET:-wget}
 SVN=${SVN:-svn}
 SVN=${SVN:-svn}
 GREP=${GREP:-grep}
 GREP=${GREP:-grep}
 PATCH=${PATCH:-patch}
 PATCH=${PATCH:-patch}
+DIFF=${DIFF:-diff}
 JIRACLI=${JIRA:-jira}
 JIRACLI=${JIRA:-jira}
 FINDBUGS_HOME=${FINDBUGS_HOME}
 FINDBUGS_HOME=${FINDBUGS_HOME}
 FORREST_HOME=${FORREST_HOME}
 FORREST_HOME=${FORREST_HOME}
@@ -61,6 +62,7 @@ printUsage() {
   echo "--svn-cmd=<cmd>        The 'svn' command to use (default 'svn')"
   echo "--svn-cmd=<cmd>        The 'svn' command to use (default 'svn')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
+  echo "--diff-cmd=<cmd>       The 'diff' command to use (default 'diff')"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
   echo "--dirty-workspace      Allow the local SVN workspace to have uncommitted changes"
   echo "--dirty-workspace      Allow the local SVN workspace to have uncommitted changes"
@@ -113,6 +115,9 @@ parseArgs() {
     --patch-cmd=*)
     --patch-cmd=*)
       PATCH=${i#*=}
       PATCH=${i#*=}
       ;;
       ;;
+    --diff-cmd=*)
+      DIFF=${i#*=}
+      ;;
     --jira-cmd=*)
     --jira-cmd=*)
       JIRACLI=${i#*=}
       JIRACLI=${i#*=}
       ;;
       ;;
@@ -430,14 +435,21 @@ checkJavacWarnings () {
   fi
   fi
   ### Compare trunk and patch javac warning numbers
   ### Compare trunk and patch javac warning numbers
   if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
   if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
-    trunkJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
-    patchJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
+    $GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt > $PATCH_DIR/filteredTrunkJavacWarnings.txt
+    $GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt > $PATCH_DIR/filteredPatchJavacWarnings.txt
+    trunkJavacWarnings=`cat $PATCH_DIR/filteredTrunkJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
+    patchJavacWarnings=`cat $PATCH_DIR/filteredPatchJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
     echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
     echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
     if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
     if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
       if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
       if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
         JIRA_COMMENT="$JIRA_COMMENT
         JIRA_COMMENT="$JIRA_COMMENT
 
 
     -1 javac.  The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
     -1 javac.  The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
+
+    $DIFF $PATCH_DIR/filteredTrunkJavacWarnings.txt $PATCH_DIR/filteredPatchJavacWarnings.txt > $PATCH_DIR/diffJavacWarnings.txt 
+        JIRA_COMMENT_FOOTER="Javac warnings: $BUILD_URL/artifact/trunk/$(basename $BASEDIR)/patchprocess/diffJavacWarnings.txt
+$JIRA_COMMENT_FOOTER"
+
         return 1
         return 1
       fi
       fi
     fi
     fi

+ 28 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -121,6 +121,9 @@ Trunk (unreleased changes)
 
 
     HADOOP-7788. Add simple HealthMonitor class to watch an HAService (todd)
     HADOOP-7788. Add simple HealthMonitor class to watch an HAService (todd)
 
 
+    HADOOP-8312. testpatch.sh should provide a simpler way to see which
+    warnings changed (bobby)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -269,6 +272,8 @@ Release 2.0.0 - UNRELEASED
 
 
     HADOOP-8152. Expand public APIs for security library classes. (atm via eli)
     HADOOP-8152. Expand public APIs for security library classes. (atm via eli)
 
 
+    HADOOP-7549. Use JDK ServiceLoader mechanism to find FileSystem implementations. (tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -373,6 +378,18 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8309. Pseudo & Kerberos AuthenticationHandler should use 
     HADOOP-8309. Pseudo & Kerberos AuthenticationHandler should use 
     getType() to create token (tucu)
     getType() to create token (tucu)
 
 
+    HADOOP-8314. HttpServer#hasAdminAccess should return false if 
+    authorization is enabled but user is not authenticated. (tucu)
+
+    HADOOP-8296. hadoop/yarn daemonlog usage wrong (Devaraj K via tgraves)
+
+    HADOOP-8310. FileContext#checkPath should handle URIs with no port. (atm)
+
+    HADOOP-8321. TestUrlStreamHandler fails. (tucu)
+
+    HADOOP-8325. Add a ShutdownHookManager to be used by different
+    components instead of the JVM shutdownhook (tucu)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -469,6 +486,17 @@ Release 0.23.3 - UNRELEASED
 
 
     HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby)
     HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby)
 
 
+    HADOOP-8305. distcp over viewfs is broken (John George via bobby)
+
+    HADOOP-8334. HttpServer sometimes returns incorrect port (Daryn Sharp via
+    bobby)
+
+    HADOOP-8330. Update TestSequenceFile.testCreateUsesFsArg() for HADOOP-8305.
+    (John George via szetszwo)
+
+    HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
+    bobby)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 23 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1236,6 +1236,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     final String address = get(name, defaultAddress);
     final String address = get(name, defaultAddress);
     return NetUtils.createSocketAddr(address, defaultPort, name);
     return NetUtils.createSocketAddr(address, defaultPort, name);
   }
   }
+
+  /**
+   * Set the socket address for the <code>name</code> property as
+   * a <code>host:port</code>.
+   */
+  public void setSocketAddr(String name, InetSocketAddress addr) {
+    set(name, NetUtils.getHostPortString(addr));
+  }
+  
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address.
+   * @param name property name.
+   * @param addr InetSocketAddress of a listener to store in the given property
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(String name,
+                                             InetSocketAddress addr) {
+    final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
+    setSocketAddr(name, connectAddr);
+    return connectAddr;
+  }
   
   
   /**
   /**
    * Load a class by name.
    * Load a class by name.

+ 9 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -350,20 +350,23 @@ public abstract class AbstractFileSystem {
       }
       }
     }
     }
     String thisScheme = this.getUri().getScheme();
     String thisScheme = this.getUri().getScheme();
-    String thisAuthority = this.getUri().getAuthority();
+    String thisHost = this.getUri().getHost();
+    String thatHost = uri.getHost();
     
     
-    // Schemes and authorities must match.
+    // Schemes and hosts must match.
     // Allow for null Authority for file:///
     // Allow for null Authority for file:///
     if (!thisScheme.equalsIgnoreCase(thatScheme) ||
     if (!thisScheme.equalsIgnoreCase(thatScheme) ||
-       (thisAuthority != null && 
-            !thisAuthority.equalsIgnoreCase(thatAuthority)) ||
-       (thisAuthority == null && thatAuthority != null)) {
+       (thisHost != null && 
+            !thisHost.equalsIgnoreCase(thatHost)) ||
+       (thisHost == null && thatHost != null)) {
       throw new InvalidPathException("Wrong FS: " + path + ", expected: "
       throw new InvalidPathException("Wrong FS: " + path + ", expected: "
           + this.getUri());
           + this.getUri());
     }
     }
     
     
+    // Ports must match, unless this FS instance is using the default port, in
+    // which case the port may be omitted from the given URI
     int thisPort = this.getUri().getPort();
     int thisPort = this.getUri().getPort();
-    int thatPort = path.toUri().getPort();
+    int thatPort = uri.getPort();
     if (thatPort == -1) { // -1 => defaultPort of Uri scheme
     if (thatPort == -1) { // -1 => defaultPort of Uri scheme
       thatPort = this.getUriDefaultPort();
       thatPort = this.getUriDefaultPort();
     }
     }

+ 10 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ShutdownHookManager;
 
 
 /**
 /**
  * The FileContext class provides an interface to the application writer for
  * The FileContext class provides an interface to the application writer for
@@ -171,7 +172,12 @@ public final class FileContext {
   
   
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
-  
+
+  /**
+   * Priority of the FileContext shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 20;
+
   /**
   /**
    * List of files that should be deleted on JVM shutdown.
    * List of files that should be deleted on JVM shutdown.
    */
    */
@@ -1456,8 +1462,8 @@ public final class FileContext {
       return false;
       return false;
     }
     }
     synchronized (DELETE_ON_EXIT) {
     synchronized (DELETE_ON_EXIT) {
-      if (DELETE_ON_EXIT.isEmpty() && !FINALIZER.isAlive()) {
-        Runtime.getRuntime().addShutdownHook(FINALIZER);
+      if (DELETE_ON_EXIT.isEmpty()) {
+        ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
       }
       }
       
       
       Set<Path> set = DELETE_ON_EXIT.get(this);
       Set<Path> set = DELETE_ON_EXIT.get(this);
@@ -2215,7 +2221,7 @@ public final class FileContext {
   /**
   /**
    * Deletes all the paths in deleteOnExit on JVM shutdown.
    * Deletes all the paths in deleteOnExit on JVM shutdown.
    */
    */
-  static class FileContextFinalizer extends Thread {
+  static class FileContextFinalizer implements Runnable {
     public synchronized void run() {
     public synchronized void run() {
       processDeleteOnExit();
       processDeleteOnExit();
     }
     }

+ 60 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -32,6 +32,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.NoSuchElementException;
+import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.Set;
 import java.util.Stack;
 import java.util.Stack;
 import java.util.TreeSet;
 import java.util.TreeSet;
@@ -54,6 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 
 
 /****************************************************************
 /****************************************************************
  * An abstract base class for a fairly generic filesystem.  It
  * An abstract base class for a fairly generic filesystem.  It
@@ -83,6 +85,11 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 
 
+  /**
+   * Priority of the FileSystem shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 10;
+
   /** FileSystem cache */
   /** FileSystem cache */
   static final Cache CACHE = new Cache();
   static final Cache CACHE = new Cache();
 
 
@@ -184,6 +191,17 @@ public abstract class FileSystem extends Configured implements Closeable {
     statistics = getStatistics(name.getScheme(), getClass());    
     statistics = getStatistics(name.getScheme(), getClass());    
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   * This implementation throws an <code>UnsupportedOperationException</code>.
+   *
+   * @return the protocol scheme for the FileSystem.
+   */
+  public String getScheme() {
+    throw new UnsupportedOperationException("Not implemented by  the FileSystem implementation");
+  }
+
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
   public abstract URI getUri();
   public abstract URI getUri();
   
   
@@ -2078,9 +2096,45 @@ public abstract class FileSystem extends Configured implements Closeable {
       ) throws IOException {
       ) throws IOException {
   }
   }
 
 
+  // making it volatile to be able to do a double checked locking
+  private volatile static boolean FILE_SYSTEMS_LOADED = false;
+
+  private static final Map<String, Class<? extends FileSystem>>
+    SERVICE_FILE_SYSTEMS = new HashMap<String, Class<? extends FileSystem>>();
+
+  private static void loadFileSystems() {
+    synchronized (FileSystem.class) {
+      if (!FILE_SYSTEMS_LOADED) {
+        ServiceLoader<FileSystem> serviceLoader = ServiceLoader.load(FileSystem.class);
+        for (FileSystem fs : serviceLoader) {
+          SERVICE_FILE_SYSTEMS.put(fs.getScheme(), fs.getClass());
+        }
+        FILE_SYSTEMS_LOADED = true;
+      }
+    }
+  }
+
+  public static Class<? extends FileSystem> getFileSystemClass(String scheme,
+      Configuration conf) throws IOException {
+    if (!FILE_SYSTEMS_LOADED) {
+      loadFileSystems();
+    }
+    Class<? extends FileSystem> clazz = null;
+    if (conf != null) {
+      clazz = (Class<? extends FileSystem>) conf.getClass("fs." + scheme + ".impl", null);
+    }
+    if (clazz == null) {
+      clazz = SERVICE_FILE_SYSTEMS.get(scheme);
+    }
+    if (clazz == null) {
+      throw new IOException("No FileSystem for scheme: " + scheme);
+    }
+    return clazz;
+  }
+
   private static FileSystem createFileSystem(URI uri, Configuration conf
   private static FileSystem createFileSystem(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {
-    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
+    Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
     if (clazz == null) {
     if (clazz == null) {
       throw new IOException("No FileSystem for scheme: " + uri.getScheme());
       throw new IOException("No FileSystem for scheme: " + uri.getScheme());
     }
     }
@@ -2128,8 +2182,8 @@ public abstract class FileSystem extends Configured implements Closeable {
         }
         }
         
         
         // now insert the new file system into the map
         // now insert the new file system into the map
-        if (map.isEmpty() && !clientFinalizer.isAlive()) {
-          Runtime.getRuntime().addShutdownHook(clientFinalizer);
+        if (map.isEmpty() ) {
+          ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
         }
         }
         fs.key = key;
         fs.key = key;
         map.put(key, fs);
         map.put(key, fs);
@@ -2144,11 +2198,8 @@ public abstract class FileSystem extends Configured implements Closeable {
       if (map.containsKey(key) && fs == map.get(key)) {
       if (map.containsKey(key) && fs == map.get(key)) {
         map.remove(key);
         map.remove(key);
         toAutoClose.remove(key);
         toAutoClose.remove(key);
-        if (map.isEmpty() && !clientFinalizer.isAlive()) {
-          if (!Runtime.getRuntime().removeShutdownHook(clientFinalizer)) {
-            LOG.info("Could not cancel cleanup thread, though no " +
-                     "FileSystems are open");
-          }
+        if (map.isEmpty()) {
+          ShutdownHookManager.get().removeShutdownHook(clientFinalizer);
         }
         }
       }
       }
     }
     }
@@ -2194,7 +2245,7 @@ public abstract class FileSystem extends Configured implements Closeable {
       }
       }
     }
     }
 
 
-    private class ClientFinalizer extends Thread {
+    private class ClientFinalizer implements Runnable {
       public synchronized void run() {
       public synchronized void run() {
         try {
         try {
           closeAll(true);
           closeAll(true);

+ 9 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import java.io.IOException;
 import java.net.URLStreamHandlerFactory;
 import java.net.URLStreamHandlerFactory;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
@@ -50,25 +51,23 @@ public class FsUrlStreamHandlerFactory implements
   private java.net.URLStreamHandler handler;
   private java.net.URLStreamHandler handler;
 
 
   public FsUrlStreamHandlerFactory() {
   public FsUrlStreamHandlerFactory() {
-    this.conf = new Configuration();
-    // force the resolution of the configuration files
-    // this is required if we want the factory to be able to handle
-    // file:// URLs
-    this.conf.getClass("fs.file.impl", null);
-    this.handler = new FsUrlStreamHandler(this.conf);
+    this(new Configuration());
   }
   }
 
 
   public FsUrlStreamHandlerFactory(Configuration conf) {
   public FsUrlStreamHandlerFactory(Configuration conf) {
     this.conf = new Configuration(conf);
     this.conf = new Configuration(conf);
-    // force the resolution of the configuration files
-    this.conf.getClass("fs.file.impl", null);
     this.handler = new FsUrlStreamHandler(this.conf);
     this.handler = new FsUrlStreamHandler(this.conf);
   }
   }
 
 
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
   public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
     if (!protocols.containsKey(protocol)) {
     if (!protocols.containsKey(protocol)) {
-      boolean known =
-          (conf.getClass("fs." + protocol + ".impl", null) != null);
+      boolean known = true;
+      try {
+        FileSystem.getFileSystemClass(protocol, conf);
+      }
+      catch (IOException ex) {
+        known = false;
+      }
       protocols.put(protocol, known);
       protocols.put(protocol, known);
     }
     }
     if (protocols.get(protocol)) {
     if (protocols.get(protocol)) {

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -71,7 +71,18 @@ public class HarFileSystem extends FilterFileSystem {
    */
    */
   public HarFileSystem() {
   public HarFileSystem() {
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>har</code>
+   */
+  @Override
+  public String getScheme() {
+    return "har";
+  }
+
   /**
   /**
    * Constructor to create a HarFileSystem with an
    * Constructor to create a HarFileSystem with an
    * underlying filesystem.
    * underlying filesystem.

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -39,7 +39,18 @@ public class LocalFileSystem extends ChecksumFileSystem {
   public LocalFileSystem() {
   public LocalFileSystem() {
     this(new RawLocalFileSystem());
     this(new RawLocalFileSystem());
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>file</code>
+   */
+  @Override
+  public String getScheme() {
+    return "file";
+  }
+
   public FileSystem getRaw() {
   public FileSystem getRaw() {
     return getRawFileSystem();
     return getRawFileSystem();
   }
   }

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -59,6 +59,17 @@ public class FTPFileSystem extends FileSystem {
 
 
   private URI uri;
   private URI uri;
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>ftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "ftp";
+  }
+
   @Override
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException { // get
   public void initialize(URI uri, Configuration conf) throws IOException { // get
     super.initialize(uri, conf);
     super.initialize(uri, conf);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java

@@ -57,6 +57,17 @@ public class KosmosFileSystem extends FileSystem {
         this.kfsImpl = fsimpl;
         this.kfsImpl = fsimpl;
     }
     }
 
 
+    /**
+     * Return the protocol scheme for the FileSystem.
+     * <p/>
+     *
+     * @return <code>kfs</code>
+     */
+    @Override
+    public String getScheme() {
+      return "kfs";
+    }
+
     @Override
     @Override
     public URI getUri() {
     public URI getUri() {
 	return uri;
 	return uri;

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -67,6 +67,17 @@ public class S3FileSystem extends FileSystem {
     this.store = store;
     this.store = store;
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>s3</code>
+   */
+  @Override
+  public String getScheme() {
+    return "s3";
+  }
+
   @Override
   @Override
   public URI getUri() {
   public URI getUri() {
     return uri;
     return uri;

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -251,7 +251,18 @@ public class NativeS3FileSystem extends FileSystem {
   public NativeS3FileSystem(NativeFileSystemStore store) {
   public NativeS3FileSystem(NativeFileSystemStore store) {
     this.store = store;
     this.store = store;
   }
   }
-  
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>s3n</code>
+   */
+  @Override
+  public String getScheme() {
+    return "s3n";
+  }
+
   @Override
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
   public void initialize(URI uri, Configuration conf) throws IOException {
     super.initialize(uri, conf);
     super.initialize(uri, conf);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -149,6 +149,17 @@ public class ViewFileSystem extends FileSystem {
     creationTime = System.currentTimeMillis();
     creationTime = System.currentTimeMillis();
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>viewfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "viewfs";
+  }
+
   /**
   /**
    * Called after a new FileSystem instance is constructed.
    * Called after a new FileSystem instance is constructed.
    * @param theUri a uri whose authority section names the host, port, etc. for
    * @param theUri a uri whose authority section names the host, port, etc. for

+ 60 - 77
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -636,80 +636,16 @@ public class HttpServer implements FilterContainer {
    */
    */
   public void start() throws IOException {
   public void start() throws IOException {
     try {
     try {
-      if(listenerStartedExternally) { // Expect that listener was started securely
-        if(listener.getLocalPort() == -1) // ... and verify
-          throw new Exception("Exepected webserver's listener to be started " +
-             "previously but wasn't");
-        // And skip all the port rolling issues.
+      try {
+        openListener();
+        LOG.info("Jetty bound to port " + listener.getLocalPort());
         webServer.start();
         webServer.start();
-      } else {
-        int port = 0;
-        int oriPort = listener.getPort(); // The original requested port
-        while (true) {
-          try {
-            port = webServer.getConnectors()[0].getLocalPort();
-            LOG.debug("Port returned by webServer.getConnectors()[0]." +
-            		"getLocalPort() before open() is "+ port + 
-            		". Opening the listener on " + oriPort);
-            listener.open();
-            port = listener.getLocalPort();
-            LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() + 
-                  " webServer.getConnectors()[0].getLocalPort() returned " +
-                  webServer.getConnectors()[0].getLocalPort());
-            //Workaround to handle the problem reported in HADOOP-4744
-            if (port < 0) {
-              Thread.sleep(100);
-              int numRetries = 1;
-              while (port < 0) {
-                LOG.warn("listener.getLocalPort returned " + port);
-                if (numRetries++ > MAX_RETRIES) {
-                  throw new Exception(" listener.getLocalPort is returning " +
-                  		"less than 0 even after " +numRetries+" resets");
-                }
-                for (int i = 0; i < 2; i++) {
-                  LOG.info("Retrying listener.getLocalPort()");
-                  port = listener.getLocalPort();
-                  if (port > 0) {
-                    break;
-                  }
-                  Thread.sleep(200);
-                }
-                if (port > 0) {
-                  break;
-                }
-                LOG.info("Bouncing the listener");
-                listener.close();
-                Thread.sleep(1000);
-                listener.setPort(oriPort == 0 ? 0 : (oriPort += 1));
-                listener.open();
-                Thread.sleep(100);
-                port = listener.getLocalPort();
-              }
-            } //Workaround end
-            LOG.info("Jetty bound to port " + port);
-            webServer.start();
-            break;
-          } catch (IOException ex) {
-            // if this is a bind exception,
-            // then try the next port number.
-            if (ex instanceof BindException) {
-              if (!findPort) {
-                BindException be = new BindException(
-                        "Port in use: " + listener.getHost()
-                                + ":" + listener.getPort());
-                be.initCause(ex);
-                throw be;
-              }
-            } else {
-              LOG.info("HttpServer.start() threw a non Bind IOException"); 
-              throw ex;
-            }
-          } catch (MultiException ex) {
-            LOG.info("HttpServer.start() threw a MultiException"); 
-            throw ex;
-          }
-          listener.setPort((oriPort += 1));
-        }
+      } catch (IOException ex) {
+        LOG.info("HttpServer.start() threw a non Bind IOException", ex);
+        throw ex;
+      } catch (MultiException ex) {
+        LOG.info("HttpServer.start() threw a MultiException", ex);
+        throw ex;
       }
       }
       // Make sure there is no handler failures.
       // Make sure there is no handler failures.
       Handler[] handlers = webServer.getHandlers();
       Handler[] handlers = webServer.getHandlers();
@@ -729,6 +665,52 @@ public class HttpServer implements FilterContainer {
     }
     }
   }
   }
 
 
+  /**
+   * Open the main listener for the server
+   * @throws Exception
+   */
+  void openListener() throws Exception {
+    if (listener.getLocalPort() != -1) { // it's already bound
+      return;
+    }
+    if (listenerStartedExternally) { // Expect that listener was started securely
+      throw new Exception("Expected webserver's listener to be started " +
+          "previously but wasn't");
+    }
+    int port = listener.getPort();
+    while (true) {
+      // jetty has a bug where you can't reopen a listener that previously
+      // failed to open w/o issuing a close first, even if the port is changed
+      try {
+        listener.close();
+        listener.open();
+        break;
+      } catch (BindException ex) {
+        if (port == 0 || !findPort) {
+          BindException be = new BindException(
+              "Port in use: " + listener.getHost() + ":" + listener.getPort());
+          be.initCause(ex);
+          throw be;
+        }
+      }
+      // try the next port number
+      listener.setPort(++port);
+      Thread.sleep(100);
+    }
+  }
+  
+  /**
+   * Return the bind address of the listener.
+   * @return InetSocketAddress of the listener
+   */
+  public InetSocketAddress getListenerAddress() {
+    int port = listener.getLocalPort();
+    if (port == -1) { // not bound, return requested port
+      port = listener.getPort();
+    }
+    return new InetSocketAddress(listener.getHost(), port);
+  }
+  
   /**
   /**
    * stop the server
    * stop the server
    */
    */
@@ -821,7 +803,10 @@ public class HttpServer implements FilterContainer {
 
 
     String remoteUser = request.getRemoteUser();
     String remoteUser = request.getRemoteUser();
     if (remoteUser == null) {
     if (remoteUser == null) {
-      return true;
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+                         "Unauthenticated users are not " +
+                         "authorized to access this page.");
+      return false;
     }
     }
     AccessControlList adminsAcl = (AccessControlList) servletContext
     AccessControlList adminsAcl = (AccessControlList) servletContext
         .getAttribute(ADMINS_ACL);
         .getAttribute(ADMINS_ACL);
@@ -830,9 +815,7 @@ public class HttpServer implements FilterContainer {
     if (adminsAcl != null) {
     if (adminsAcl != null) {
       if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
       if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
         response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
         response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
-            + remoteUser + " is unauthorized to access this page. "
-            + "AccessControlList for accessing this page : "
-            + adminsAcl.toString());
+            + remoteUser + " is unauthorized to access this page.");
         return false;
         return false;
       }
       }
     }
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -1050,9 +1050,9 @@ public class SequenceFile {
         int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
         int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
           bufferSizeOption.getValue();
           bufferSizeOption.getValue();
         short replication = replicationOption == null ? 
         short replication = replicationOption == null ? 
-          fs.getDefaultReplication() :
+          fs.getDefaultReplication(p) :
           (short) replicationOption.getValue();
           (short) replicationOption.getValue();
-        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize() :
+        long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
           blockSizeOption.getValue();
           blockSizeOption.getValue();
         Progressable progress = progressOption == null ? null :
         Progressable progress = progressOption == null ? null :
           progressOption.getValue();
           progressOption.getValue();

+ 3 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -36,11 +36,9 @@ import org.apache.hadoop.util.ServletUtil;
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class LogLevel {
 public class LogLevel {
-  public static final String USAGES = "\nUSAGES:\n"
-    + "java " + LogLevel.class.getName()
-    + " -getlevel <host:port> <name>\n"
-    + "java " + LogLevel.class.getName()
-    + " -setlevel <host:port> <name> <level>\n";
+  public static final String USAGES = "\nUsage: General options are:\n"
+      + "\t[-getlevel <host:httpPort> <name>]\n"
+      + "\t[-setlevel <host:httpPort> <name> <level>]\n";
 
 
   /**
   /**
    * A command line implementation
    * A command line implementation

+ 13 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -351,8 +351,19 @@ public class NetUtils {
    * @return socket address that a client can use to connect to the server.
    * @return socket address that a client can use to connect to the server.
    */
    */
   public static InetSocketAddress getConnectAddress(Server server) {
   public static InetSocketAddress getConnectAddress(Server server) {
-    InetSocketAddress addr = server.getListenerAddress();
-    if (addr.getAddress().isAnyLocalAddress()) {
+    return getConnectAddress(server.getListenerAddress());
+  }
+  
+  /**
+   * Returns the InetSocketAddress that a client can use to connect to the
+   * given listening address.  This returns "hostname:port" of the server,
+   * or "127.0.0.1:port" when given a wildcard address of "0.0.0.0:port".
+   * 
+   * @param addr of a listener
+   * @return socket address that a client can use to connect to the server.
+   */
+  public static InetSocketAddress getConnectAddress(InetSocketAddress addr) {
+    if (!addr.isUnresolved() && addr.getAddress().isAnyLocalAddress()) {
       try {
       try {
         addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort());
         addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort());
       } catch (UnknownHostException uhe) {
       } catch (UnknownHostException uhe) {

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -50,6 +50,11 @@ public class RunJar {
   /** Pattern that matches any string */
   /** Pattern that matches any string */
   public static final Pattern MATCH_ANY = Pattern.compile(".*");
   public static final Pattern MATCH_ANY = Pattern.compile(".*");
 
 
+  /**
+   * Priority of the RunJar shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 10;
+
   /**
   /**
    * Unpack a jar file into a directory.
    * Unpack a jar file into a directory.
    *
    *
@@ -167,11 +172,14 @@ public class RunJar {
     }
     }
     ensureDirectory(workDir);
     ensureDirectory(workDir);
 
 
-    Runtime.getRuntime().addShutdownHook(new Thread() {
+    ShutdownHookManager.get().addShutdownHook(
+      new Runnable() {
+        @Override
         public void run() {
         public void run() {
           FileUtil.fullyDelete(workDir);
           FileUtil.fullyDelete(workDir);
         }
         }
-      });
+      }, SHUTDOWN_HOOK_PRIORITY);
+
 
 
     unJar(file, workDir);
     unJar(file, workDir);
 
 

+ 181 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java

@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The <code>ShutdownHookManager</code> enables running shutdownHook
+ * in a determistic order, higher priority first.
+ * <p/>
+ * The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
+ * This class registers a single JVM shutdownHook and run all the
+ * shutdownHooks registered to it (to this class) in order based on their
+ * priority.
+ */
+public class ShutdownHookManager {
+
+  private static final ShutdownHookManager MGR = new ShutdownHookManager();
+
+  private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
+
+  static {
+    Runtime.getRuntime().addShutdownHook(
+      new Thread() {
+        @Override
+        public void run() {
+          MGR.shutdownInProgress.set(true);
+          for (Runnable hook: MGR.getShutdownHooksInOrder()) {
+            try {
+              hook.run();
+            } catch (Throwable ex) {
+              LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() +
+                       "' failed, " + ex.toString(), ex);
+            }
+          }
+        }
+      }
+    );
+  }
+
+  /**
+   * Return <code>ShutdownHookManager</code> singleton.
+   *
+   * @return <code>ShutdownHookManager</code> singleton.
+   */
+  public static ShutdownHookManager get() {
+    return MGR;
+  }
+
+  /**
+   * Private structure to store ShutdownHook and its priority.
+   */
+  private static class HookEntry {
+    Runnable hook;
+    int priority;
+
+    public HookEntry(Runnable hook, int priority) {
+      this.hook = hook;
+      this.priority = priority;
+    }
+
+    @Override
+    public int hashCode() {
+      return hook.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      boolean eq = false;
+      if (obj != null) {
+        if (obj instanceof HookEntry) {
+          eq = (hook == ((HookEntry)obj).hook);
+        }
+      }
+      return eq;
+    }
+
+  }
+
+  private Set<HookEntry> hooks =
+    Collections.synchronizedSet(new HashSet<HookEntry>());
+
+  private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
+
+  //private to constructor to ensure singularity
+  private ShutdownHookManager() {
+  }
+
+  /**
+   * Returns the list of shutdownHooks in order of execution,
+   * Highest priority first.
+   *
+   * @return the list of shutdownHooks in order of execution.
+   */
+  List<Runnable> getShutdownHooksInOrder() {
+    List<HookEntry> list;
+    synchronized (MGR.hooks) {
+      list = new ArrayList<HookEntry>(MGR.hooks);
+    }
+    Collections.sort(list, new Comparator<HookEntry>() {
+
+      //reversing comparison so highest priority hooks are first
+      @Override
+      public int compare(HookEntry o1, HookEntry o2) {
+        return o2.priority - o1.priority;
+      }
+    });
+    List<Runnable> ordered = new ArrayList<Runnable>();
+    for (HookEntry entry: list) {
+      ordered.add(entry.hook);
+    }
+    return ordered;
+  }
+
+  /**
+   * Adds a shutdownHook with a priority, the higher the priority
+   * the earlier will run. ShutdownHooks with same priority run
+   * in a non-deterministic order.
+   *
+   * @param shutdownHook shutdownHook <code>Runnable</code>
+   * @param priority priority of the shutdownHook.
+   */
+  public void addShutdownHook(Runnable shutdownHook, int priority) {
+    if (shutdownHook == null) {
+      throw new IllegalArgumentException("shutdownHook cannot be NULL");
+    }
+    if (shutdownInProgress.get()) {
+      throw new IllegalStateException("Shutdown in progress, cannot add a shutdownHook");
+    }
+    hooks.add(new HookEntry(shutdownHook, priority));
+  }
+
+  /**
+   * Removes a shutdownHook.
+   *
+   * @param shutdownHook shutdownHook to remove.
+   * @return TRUE if the shutdownHook was registered and removed,
+   * FALSE otherwise.
+   */
+  public boolean removeShutdownHook(Runnable shutdownHook) {
+    if (shutdownInProgress.get()) {
+      throw new IllegalStateException("Shutdown in progress, cannot remove a shutdownHook");
+    }
+    return hooks.remove(new HookEntry(shutdownHook, 0));
+  }
+
+  /**
+   * Indicates if a shutdownHook is registered or nt.
+   *
+   * @param shutdownHook shutdownHook to check if registered.
+   * @return TRUE/FALSE depending if the shutdownHook is is registered.
+   */
+  public boolean hasShutdownHook(Runnable shutdownHook) {
+    return hooks.contains(new HookEntry(shutdownHook, 0));
+  }
+
+}

+ 14 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -46,6 +46,11 @@ import org.apache.hadoop.net.NetUtils;
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class StringUtils {
 public class StringUtils {
 
 
+  /**
+   * Priority of the StringUtils shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 0;
+
   private static final DecimalFormat decimalFormat;
   private static final DecimalFormat decimalFormat;
   static {
   static {
           NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
           NumberFormat numberFormat = NumberFormat.getNumberInstance(Locale.ENGLISH);
@@ -600,12 +605,15 @@ public class StringUtils {
         )
         )
       );
       );
 
 
-    Runtime.getRuntime().addShutdownHook(new Thread() {
-      public void run() {
-        LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
-          "Shutting down " + classname + " at " + hostname}));
-      }
-    });
+    ShutdownHookManager.get().addShutdownHook(
+      new Runnable() {
+        @Override
+        public void run() {
+          LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
+            "Shutting down " + classname + " at " + hostname}));
+        }
+      }, SHUTDOWN_HOOK_PRIORITY);
+
   }
   }
 
 
   /**
   /**

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.LocalFileSystem
+org.apache.hadoop.fs.viewfs.ViewFileSystem
+org.apache.hadoop.fs.s3.S3FileSystem
+org.apache.hadoop.fs.s3native.NativeS3FileSystem
+org.apache.hadoop.fs.kfs.KosmosFileSystem
+org.apache.hadoop.fs.ftp.FTPFileSystem
+org.apache.hadoop.fs.HarFileSystem

+ 0 - 70
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -352,25 +352,6 @@
   </description>
   </description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.file.impl</name>
-  <value>org.apache.hadoop.fs.LocalFileSystem</value>
-  <description>The FileSystem for file: uris.</description>
-</property>
-
-<property>
-  <name>fs.hdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
-  <description>The FileSystem for hdfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.viewfs.impl</name>
-  <value>org.apache.hadoop.fs.viewfs.ViewFileSystem</value>
-  <description>The FileSystem for view file system for viewfs: uris
-  (ie client side mount table:).</description>
-</property>
-
 <property>
 <property>
   <name>fs.AbstractFileSystem.file.impl</name>
   <name>fs.AbstractFileSystem.file.impl</name>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
@@ -391,45 +372,6 @@
   (ie client side mount table:).</description>
   (ie client side mount table:).</description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.s3.impl</name>
-  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
-  <description>The FileSystem for s3: uris.</description>
-</property>
-
-<property>
-  <name>fs.s3n.impl</name>
-  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
-  <description>The FileSystem for s3n: (Native S3) uris.</description>
-</property>
-
-<property>
-  <name>fs.kfs.impl</name>
-  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
-  <description>The FileSystem for kfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.hftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.hsftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.webhdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
-</property>
-
-<property>
-  <name>fs.ftp.impl</name>
-  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
-  <description>The FileSystem for ftp: uris.</description>
-</property>
-
 <property>
 <property>
   <name>fs.ftp.host</name>
   <name>fs.ftp.host</name>
   <value>0.0.0.0</value>
   <value>0.0.0.0</value>
@@ -444,18 +386,6 @@
   </description>
   </description>
 </property>
 </property>
 
 
-<property>
-  <name>fs.har.impl</name>
-  <value>org.apache.hadoop.fs.HarFileSystem</value>
-  <description>The filesystem for Hadoop archives. </description>
-</property>
-
-<property>
-  <name>fs.har.impl.disable.cache</name>
-  <value>true</value>
-  <description>Don't cache 'har' filesystem instances.</description>
-</property>
-
 <property>
 <property>
   <name>fs.df.interval</name>
   <name>fs.df.interval</name>
   <value>60000</value>
   <value>60000</value>

+ 22 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.StringWriter;
 import java.io.StringWriter;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -671,6 +672,27 @@ public class TestConfiguration extends TestCase {
     }
     }
   }
   }
 
 
+  public void testSetSocketAddress() throws IOException {
+    Configuration conf = new Configuration();
+    NetUtils.addStaticResolution("host", "127.0.0.1");
+    final String defaultAddr = "host:1";
+    
+    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);    
+    conf.setSocketAddr("myAddress", addr);
+    assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
+  }
+  
+  public void testUpdateSocketAddress() throws IOException {
+    InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
+    InetSocketAddress connectAddr = conf.updateConnectAddr("myAddress", addr);
+    assertEquals(connectAddr.getHostName(), addr.getHostName());
+    
+    addr = new InetSocketAddress(1);
+    connectAddr = conf.updateConnectAddr("myAddress", addr);
+    assertEquals(connectAddr.getHostName(),
+                 InetAddress.getLocalHost().getHostName());
+  }
+
   public void testReload() throws IOException {
   public void testReload() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     startConfig();

+ 176 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Test;
+
+public class TestAfsCheckPath {
+  
+  private static int DEFAULT_PORT = 1234;
+  private static int OTHER_PORT = 4321;
+  
+  @Test
+  public void testCheckPathWithNoPorts() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host");
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host"));
+  }
+  
+  @Test
+  public void testCheckPathWithDefaultPort() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + DEFAULT_PORT));
+  }
+  
+  @Test
+  public void testCheckPathWithTheSameNonDefaultPort()
+      throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + OTHER_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
+  }
+  
+  @Test(expected=InvalidPathException.class)
+  public void testCheckPathWithDifferentPorts() throws URISyntaxException {
+    URI uri = new URI("dummy://dummy-host:" + DEFAULT_PORT);
+    AbstractFileSystem afs = new DummyFileSystem(uri);
+    afs.checkPath(new Path("dummy://dummy-host:" + OTHER_PORT));
+  }
+  
+  private static class DummyFileSystem extends AbstractFileSystem {
+    
+    public DummyFileSystem(URI uri) throws URISyntaxException {
+      super(uri, "dummy", true, DEFAULT_PORT);
+    }
+    
+    @Override
+    public int getUriDefaultPort() {
+      return DEFAULT_PORT;
+    }
+
+    @Override
+    public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
+        FsPermission absolutePermission, int bufferSize, short replication,
+        long blockSize, Progressable progress, int bytesPerChecksum,
+        boolean createParent) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public boolean delete(Path f, boolean recursive)
+        throws AccessControlException, FileNotFoundException,
+        UnresolvedLinkException, IOException {
+      // deliberately empty
+      return false;
+    }
+
+    @Override
+    public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
+        throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileChecksum getFileChecksum(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileStatus getFileStatus(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FsStatus getFsStatus() throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FsServerDefaults getServerDefaults() throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public FileStatus[] listStatus(Path f) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public void mkdir(Path dir, FsPermission permission, boolean createParent)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+      // deliberately empty
+      return null;
+    }
+
+    @Override
+    public void renameInternal(Path src, Path dst) throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setOwner(Path f, String username, String groupname)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setPermission(Path f, FsPermission permission)
+        throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public boolean setReplication(Path f, short replication) throws IOException {
+      // deliberately empty
+      return false;
+    }
+
+    @Override
+    public void setTimes(Path f, long mtime, long atime) throws IOException {
+      // deliberately empty
+    }
+
+    @Override
+    public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
+      // deliberately empty
+    }
+    
+  }
+}

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Set;
 import java.util.Set;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -65,7 +66,7 @@ public class TestFileContextDeleteOnExit {
     checkDeleteOnExitData(1, fc, file1);
     checkDeleteOnExitData(1, fc, file1);
     
     
     // Ensure shutdown hook is added
     // Ensure shutdown hook is added
-    Assert.assertTrue(Runtime.getRuntime().removeShutdownHook(FileContext.FINALIZER));
+    Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
     
     
     Path file2 = getTestRootPath(fc, "dir1/file2");
     Path file2 = getTestRootPath(fc, "dir1/file2");
     createFile(fc, file2, numBlocks, blockSize);
     createFile(fc, file2, numBlocks, blockSize);
@@ -79,8 +80,7 @@ public class TestFileContextDeleteOnExit {
     
     
     // trigger deleteOnExit and ensure the registered
     // trigger deleteOnExit and ensure the registered
     // paths are cleaned up
     // paths are cleaned up
-    FileContext.FINALIZER.start();
-    FileContext.FINALIZER.join();
+    FileContext.FINALIZER.run();
     checkDeleteOnExitData(0, fc, new Path[0]);
     checkDeleteOnExitData(0, fc, new Path[0]);
     Assert.assertFalse(exists(fc, file1));
     Assert.assertFalse(exists(fc, file1));
     Assert.assertFalse(exists(fc, file2));
     Assert.assertFalse(exists(fc, file2));

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java

@@ -43,7 +43,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCacheEnabled() throws Exception {
   public void testCacheEnabled() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
     assertSame(fs1, fs2);
     assertSame(fs1, fs2);
@@ -84,7 +84,7 @@ public class TestFileSystemCaching {
     // wait for InitializeForeverFileSystem to start initialization
     // wait for InitializeForeverFileSystem to start initialization
     InitializeForeverFileSystem.sem.acquire();
     InitializeForeverFileSystem.sem.acquire();
     
     
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem.get(new URI("cachedfile://a"), conf);
     t.interrupt();
     t.interrupt();
     t.join();
     t.join();
@@ -93,7 +93,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCacheDisabled() throws Exception {
   public void testCacheDisabled() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("fs.uncachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.uncachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
     conf.setBoolean("fs.uncachedfile.impl.disable.cache", true);
     FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs1 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("uncachedfile://a"), conf);
@@ -104,7 +104,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
   public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
     UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@@ -156,7 +156,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testUserFS() throws Exception {
   public void testUserFS() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
     FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
     FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
     FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
     
     
@@ -166,7 +166,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testFsUniqueness() throws Exception {
   public void testFsUniqueness() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     // multiple invocations of FileSystem.get return the same object.
     // multiple invocations of FileSystem.get return the same object.
     FileSystem fs1 = FileSystem.get(conf);
     FileSystem fs1 = FileSystem.get(conf);
     FileSystem fs2 = FileSystem.get(conf);
     FileSystem fs2 = FileSystem.get(conf);
@@ -183,7 +183,7 @@ public class TestFileSystemCaching {
   @Test
   @Test
   public void testCloseAllForUGI() throws Exception {
   public void testCloseAllForUGI() throws Exception {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
+    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
     FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
       public FileSystem run() throws Exception {
       public FileSystem run() throws Exception {

+ 4 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -165,7 +165,10 @@ public class TestFilterFileSystem {
     public Token<?> getDelegationToken(String renewer) throws IOException {
     public Token<?> getDelegationToken(String renewer) throws IOException {
       return null;
       return null;
     }
     }
-    
+
+    public String getScheme() {
+      return "dontcheck";
+    }
   }
   }
   
   
   @Test
   @Test

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -99,6 +99,19 @@ public class HttpServerFunctionalTest extends Assert {
     }
     }
   }
   }
 
 
+  /**
+   * Create an HttpServer instance on the given address for the given webapp
+   * @param host to bind
+   * @param port to bind
+   * @return the server
+   * @throws IOException if it could not be created
+   */
+  public static HttpServer createServer(String host, int port)
+      throws IOException {
+    prepareTestWebapp();
+    return new HttpServer(TEST, host, port, true);
+  }
+
   /**
   /**
    * Create an HttpServer instance for the given webapp
    * Create an HttpServer instance for the given webapp
    * @param webapp the webapp to work with
    * @param webapp the webapp to work with

+ 96 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.http;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.Enumeration;
@@ -35,6 +36,7 @@ import java.util.concurrent.Executors;
 import javax.servlet.Filter;
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.ServletResponse;
@@ -53,10 +55,12 @@ import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
@@ -422,4 +426,96 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     assertEquals("bar", m.get(JerseyResource.OP));
     assertEquals("bar", m.get(JerseyResource.OP));
     LOG.info("END testJersey()");
     LOG.info("END testJersey()");
   }
   }
+
+  @Test
+  public void testHasAdministratorAccess() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null);
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(request.getRemoteUser()).thenReturn(null);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    //authorization OFF
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+    //authorization ON & user NULL
+    response = Mockito.mock(HttpServletResponse.class);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+    //authorization ON & user NOT NULL & ACLs NULL
+    response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getRemoteUser()).thenReturn("foo");
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+    //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
+    response = Mockito.mock(HttpServletResponse.class);
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+    //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
+    response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+  }
+
+  @Test public void testBindAddress() throws Exception {
+    checkBindAddress("0.0.0.0", 0, false).stop();
+    // hang onto this one for a bit more testing
+    HttpServer myServer = checkBindAddress("localhost", 0, false);
+    HttpServer myServer2 = null;
+    try { 
+      int port = myServer.getListenerAddress().getPort();
+      // it's already in use, true = expect a higher port
+      myServer2 = checkBindAddress("localhost", port, true);
+      // try to reuse the port
+      port = myServer2.getListenerAddress().getPort();
+      myServer2.stop();
+      assertEquals(-1, myServer2.getPort()); // not bound
+      myServer2.openListener();
+      assertEquals(port, myServer2.getPort()); // expect same port
+    } finally {
+      myServer.stop();
+      if (myServer2 != null) {
+        myServer2.stop();
+      }
+    }
+  }
+  
+  private HttpServer checkBindAddress(String host, int port, boolean findPort)
+      throws Exception {
+    HttpServer server = createServer(host, port);
+    try {
+      // not bound, ephemeral should return requested port (0 for ephemeral)
+      InetSocketAddress addr = server.getListenerAddress();
+      assertEquals(port, addr.getPort());
+      // verify hostname is what was given
+      server.openListener();
+      addr = server.getListenerAddress();
+      assertEquals(host, addr.getHostName());
+
+      int boundPort = addr.getPort();
+      if (port == 0) {
+        assertTrue(boundPort != 0); // ephemeral should now return bound port
+      } else if (findPort) {
+        assertTrue(boundPort > port);
+        // allow a little wiggle room to prevent random test failures if
+        // some consecutive ports are already in use
+        assertTrue(addr.getPort() - port < 8);
+      }
+    } catch (Exception e) {
+      server.stop();
+      throw e;
+    }
+    return server;
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java

@@ -470,7 +470,7 @@ public class TestSequenceFile extends TestCase {
     SequenceFile.Writer writer = SequenceFile.createWriter(
     SequenceFile.Writer writer = SequenceFile.createWriter(
         spyFs, conf, p, NullWritable.class, NullWritable.class);
         spyFs, conf, p, NullWritable.class, NullWritable.class);
     writer.close();
     writer.close();
-    Mockito.verify(spyFs).getDefaultReplication();
+    Mockito.verify(spyFs).getDefaultReplication(p);
   }
   }
 
 
   private static class TestFSDataInputStream extends FSDataInputStream {
   private static class TestFSDataInputStream extends FSDataInputStream {

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -169,6 +169,19 @@ public class TestNetUtils {
     assertInException(wrapped, "/UnknownHost");
     assertInException(wrapped, "/UnknownHost");
   }
   }
   
   
+  @Test
+  public void testGetConnectAddress() throws IOException {
+    NetUtils.addStaticResolution("host", "127.0.0.1");
+    InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
+    InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
+    assertEquals(addr.getHostName(), connectAddr.getHostName());
+    
+    addr = new InetSocketAddress(1);
+    connectAddr = NetUtils.getConnectAddress(addr);
+    assertEquals(InetAddress.getLocalHost().getHostName(),
+                 connectAddr.getHostName());
+  }
+
   @Test
   @Test
   public void testCreateSocketAddress() throws Throwable {
   public void testCreateSocketAddress() throws Throwable {
     InetSocketAddress addr = NetUtils.createSocketAddr(
     InetSocketAddress addr = NetUtils.createSocketAddr(

+ 62 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestShutdownHookManager {
+
+  @Test
+  public void shutdownHookManager() {
+    ShutdownHookManager mgr = ShutdownHookManager.get();
+    Assert.assertNotNull(mgr);
+    Assert.assertEquals(0, mgr.getShutdownHooksInOrder().size());
+    Runnable hook1 = new Runnable() {
+      @Override
+      public void run() {
+      }
+    };
+    Runnable hook2 = new Runnable() {
+      @Override
+      public void run() {
+      }
+    };
+
+    mgr.addShutdownHook(hook1, 0);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+    Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0));
+    mgr.removeShutdownHook(hook1);
+    Assert.assertFalse(mgr.hasShutdownHook(hook1));
+
+    mgr.addShutdownHook(hook1, 0);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
+
+    mgr.addShutdownHook(hook2, 1);
+    Assert.assertTrue(mgr.hasShutdownHook(hook1));
+    Assert.assertTrue(mgr.hasShutdownHook(hook2));
+    Assert.assertEquals(2, mgr.getShutdownHooksInOrder().size());
+    Assert.assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0));
+    Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1));
+
+  }
+}

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

@@ -43,8 +43,8 @@ import java.util.Map;
 public class FSOperations {
 public class FSOperations {
 
 
   /**
   /**
-   * Converts a Unix permission octal & symbolic representation
-   * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
+   * Converts a Unix permission octal
+   * (i.e. 655 or 1777) into a FileSystemAccess permission.
    *
    *
    * @param str Unix permission symbolic representation.
    * @param str Unix permission symbolic representation.
    *
    *
@@ -55,10 +55,8 @@ public class FSOperations {
     FsPermission permission;
     FsPermission permission;
     if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
     if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
       permission = FsPermission.getDefault();
       permission = FsPermission.getDefault();
-    } else if (str.length() == 3) {
-      permission = new FsPermission(Short.parseShort(str, 8));
     } else {
     } else {
-      permission = FsPermission.valueOf(str);
+      permission = new FsPermission(Short.parseShort(str, 8));
     }
     }
     return permission;
     return permission;
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java

@@ -446,7 +446,7 @@ public class HttpFSParams {
      * Symbolic Unix permissions regular expression pattern.
      * Symbolic Unix permissions regular expression pattern.
      */
      */
     private static final Pattern PERMISSION_PATTERN =
     private static final Pattern PERMISSION_PATTERN =
-      Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]");
+      Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
 
 
     /**
     /**
      * Constructor.
      * Constructor.

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java

@@ -310,11 +310,8 @@ public class TestHttpFSFileSystem extends HFSTestCase {
 
 
   private void testSetPermission() throws Exception {
   private void testSetPermission() throws Exception {
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
-    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
-    OutputStream os = fs.create(path);
-    os.write(1);
-    os.close();
-    fs.close();
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foodir");
+    fs.mkdirs(path);
 
 
     fs = getHttpFileSystem();
     fs = getHttpFileSystem();
     FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
     FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
@@ -326,6 +323,19 @@ public class TestHttpFSFileSystem extends HFSTestCase {
     fs.close();
     fs.close();
     FsPermission permission2 = status1.getPermission();
     FsPermission permission2 = status1.getPermission();
     Assert.assertEquals(permission2, permission1);
     Assert.assertEquals(permission2, permission1);
+
+    //sticky bit 
+    fs = getHttpFileSystem();
+    permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE, true);
+    fs.setPermission(path, permission1);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    status1 = fs.getFileStatus(path);
+    fs.close();
+    permission2 = status1.getPermission();
+    Assert.assertTrue(permission2.getStickyBit());
+    Assert.assertEquals(permission2, permission1);
   }
   }
 
 
   private void testSetOwner() throws Exception {
   private void testSetOwner() throws Exception {

+ 43 - 4
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -65,12 +65,17 @@ Trunk (unreleased changes)
     HDFS-3273. Refactor BackupImage and FSEditLog, and rename
     HDFS-3273. Refactor BackupImage and FSEditLog, and rename
     JournalListener.rollLogs(..) to startLogSegment(..).  (szetszwo)
     JournalListener.rollLogs(..) to startLogSegment(..).  (szetszwo)
 
 
-    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and
-    getRawUsed() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
+    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawUsed()
+    and getRawCapacity() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
 
 
-    HDFS-3282. Expose getFileLength API. (umamahesh)
+    HADOOP-8285. HDFS changes for Use ProtoBuf for RpcPayLoadHeader. (sanjay
+    radia)
 
 
-    HADOOP-8285 HDFS changes for Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+    HDFS-2743. Streamline usage of bookkeeper journal manager. 
+    (Ivan Kelly via umamahesh)
+
+    HDFS-3293. Add toString(), equals(..) and hashCode() to JournalInfo.
+    (Hari Mankude via szetszwo)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -134,6 +139,8 @@ Trunk (unreleased changes)
     (Henry Robinson via atm)
     (Henry Robinson via atm)
 
 
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
+
+    HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
     
     
 Release 2.0.0 - UNRELEASED 
 Release 2.0.0 - UNRELEASED 
 
 
@@ -214,6 +221,10 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
     HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
 
 
+    HDFS-3282. Add HdfsDataInputStream as a public API. (umamahesh)
+
+    HDFS-3298. Add HdfsDataOutputStream as a public API.  (szetszwo)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-2018. Move all journal stream management code into one place.
     HDFS-2018. Move all journal stream management code into one place.
@@ -400,6 +411,12 @@ Release 2.0.0 - UNRELEASED
     HDFS-3169. TestFsck should test multiple -move operations in a row.
     HDFS-3169. TestFsck should test multiple -move operations in a row.
     (Colin Patrick McCabe via eli)
     (Colin Patrick McCabe via eli)
 
 
+    HDFS-3258. Test for HADOOP-8144 (pseudoSortByDistance in
+    NetworkTopology for first rack local node). (Junping Du via eli)
+
+    HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs.
+    (szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -553,6 +570,17 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3314. HttpFS operation for getHomeDirectory is incorrect. (tucu)
     HDFS-3314. HttpFS operation for getHomeDirectory is incorrect. (tucu)
 
 
+    HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
+    (szetszwo)
+
+    HDFS-3181. Fix a test case in TestLeaseRecovery2.  (szetszwo)
+
+    HDFS-3309. HttpFS (Hoop) chmod not supporting octal and sticky bit 
+    permissions. (tucu)
+
+    HDFS-3326. Append enabled log message uses the wrong variable.
+    (Matthew Jacobs via eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -895,6 +923,17 @@ Release 0.23.3 - UNRELEASED
     HDFS-3312. In HftpFileSystem, the namenode URI is non-secure but the
     HDFS-3312. In HftpFileSystem, the namenode URI is non-secure but the
     delegation tokens have to use secure URI.  (Daryn Sharp via szetszwo)
     delegation tokens have to use secure URI.  (Daryn Sharp via szetszwo)
 
 
+    HDFS-3318. Use BoundedInputStream in ByteRangeInputStream, otherwise, it
+    hangs on transfers >2 GB.  (Daryn Sharp via szetszwo)
+
+    HDFS-3321. Fix safe mode turn off tip message.  (Ravi Prakash via szetszwo)
+
+    HDFS-3334. Fix ByteRangeInputStream stream leakage.  (Daryn Sharp via
+    szetszwo)
+
+    HDFS-3331. In namenode, check superuser privilege for setBalancerBandwidth
+    and acquire the write lock for finalizeUpgrade.  (szetszwo)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 13 - 7
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt

@@ -12,19 +12,25 @@ How do I build?
  To generate the distribution packages for BK journal, do the
  To generate the distribution packages for BK journal, do the
  following.
  following.
 
 
-   $ mvn clean install -Pdist -Dtar
+   $ mvn clean package -Pdist
 
 
- This will generate a tarball, 
- target/hadoop-hdfs-bkjournal-<VERSION>.tar.gz 
+ This will generate a jar with all the dependencies needed by the journal
+ manager, 
+
+ target/hadoop-hdfs-bkjournal-<VERSION>.jar
+
+ Note that the -Pdist part of the build command is important, as otherwise
+ the dependencies would not be packaged in the jar. 
 
 
 -------------------------------------------------------------------------------
 -------------------------------------------------------------------------------
 How do I use the BookKeeper Journal?
 How do I use the BookKeeper Journal?
 
 
- To run a HDFS namenode using BookKeeper as a backend, extract the
- distribution package on top of hdfs
+ To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
+ jar, generated above, into the lib directory of hdfs. In the standard 
+ distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
 
-   cd hadoop-hdfs-<VERSION>/
-   tar --strip-components 1 -zxvf path/to/hadoop-hdfs-bkjournal-<VERSION>.tar.gz
+  cp target/hadoop-hdfs-bkjournal-<VERSION>.jar \
+    $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
 
  Then, in hdfs-site.xml, set the following properties.
  Then, in hdfs-site.xml, set the following properties.
 
 

+ 46 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -65,4 +65,50 @@
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
+  <profiles>
+    <profile>
+      <id>dist</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <version>1.5</version>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <createDependencyReducedPom>false</createDependencyReducedPom>
+                  <artifactSet>
+                    <includes>
+                      <include>org.apache.bookkeeper:bookkeeper-server</include>
+                      <include>org.apache.zookeeper:zookeeper</include>
+                      <include>org.jboss.netty:netty</include>
+                    </includes>
+                  </artifactSet>
+                <relocations>
+                  <relocation>
+                    <pattern>org.apache.bookkeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.bookkeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.apache.zookeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.zookeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.jboss.netty</pattern>
+                    <shadedPattern>hidden.bkjournal.org.jboss.netty</shadedPattern>
+                  </relocation>
+                </relocations>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>
 </project>

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -35,6 +35,8 @@ import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,8 +45,8 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
@@ -88,11 +90,11 @@ public class Hdfs extends AbstractFileSystem {
   }
   }
 
 
   @Override
   @Override
-  public FSDataOutputStream createInternal(Path f,
+  public HdfsDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bytesPerChecksum, boolean createParent) throws IOException {
       int bytesPerChecksum, boolean createParent) throws IOException {
-    return new FSDataOutputStream(dfs.primitiveCreate(getUriPath(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
         absolutePermission, createFlag, createParent, replication, blockSize,
         absolutePermission, createFlag, createParent, replication, blockSize,
         progress, bufferSize, bytesPerChecksum), getStatistics());
         progress, bufferSize, bytesPerChecksum), getStatistics());
   }
   }
@@ -324,8 +326,9 @@ public class Hdfs extends AbstractFileSystem {
     dfs.mkdirs(getUriPath(dir), permission, createParent);
     dfs.mkdirs(getUriPath(dir), permission, createParent);
   }
   }
 
 
+  @SuppressWarnings("deprecation")
   @Override
   @Override
-  public FSDataInputStream open(Path f, int bufferSize) 
+  public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
     return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
     return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
         bufferSize, verifyChecksum));
         bufferSize, verifyChecksum));

+ 71 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java

@@ -23,9 +23,12 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 
 
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
 /**
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * created each time. This class hides the complexity of those multiple 
  * created each time. This class hides the complexity of those multiple 
@@ -60,7 +63,7 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   }
   }
 
 
   enum StreamStatus {
   enum StreamStatus {
-    NORMAL, SEEK
+    NORMAL, SEEK, CLOSED
   }
   }
   protected InputStream in;
   protected InputStream in;
   protected URLOpener originalURL;
   protected URLOpener originalURL;
@@ -88,66 +91,93 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
       ) throws IOException;
       ) throws IOException;
 
 
-  private InputStream getInputStream() throws IOException {
-    if (status != StreamStatus.NORMAL) {
-      
-      if (in != null) {
-        in.close();
-        in = null;
-      }
-      
-      // Use the original url if no resolved url exists, eg. if
-      // it's the first time a request is made.
-      final URLOpener opener =
-        (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
-
-      final HttpURLConnection connection = opener.openConnection(startPos);
-      connection.connect();
-      checkResponseCode(connection);
-
-      final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
-      filelength = (cl == null) ? -1 : Long.parseLong(cl);
-      in = connection.getInputStream();
-
-      resolvedURL.setURL(getResolvedUrl(connection));
-      status = StreamStatus.NORMAL;
+  @VisibleForTesting
+  protected InputStream getInputStream() throws IOException {
+    switch (status) {
+      case NORMAL:
+        break;
+      case SEEK:
+        if (in != null) {
+          in.close();
+        }
+        in = openInputStream();
+        status = StreamStatus.NORMAL;
+        break;
+      case CLOSED:
+        throw new IOException("Stream closed");
     }
     }
-    
     return in;
     return in;
   }
   }
   
   
-  private void update(final boolean isEOF, final int n)
-      throws IOException {
-    if (!isEOF) {
+  @VisibleForTesting
+  protected InputStream openInputStream() throws IOException {
+    // Use the original url if no resolved url exists, eg. if
+    // it's the first time a request is made.
+    final URLOpener opener =
+      (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
+
+    final HttpURLConnection connection = opener.openConnection(startPos);
+    connection.connect();
+    checkResponseCode(connection);
+
+    final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
+    if (cl == null) {
+      throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
+    }
+    final long streamlength = Long.parseLong(cl);
+    filelength = startPos + streamlength;
+    // Java has a bug with >2GB request streams.  It won't bounds check
+    // the reads so the transfer blocks until the server times out
+    InputStream is =
+        new BoundedInputStream(connection.getInputStream(), streamlength);
+
+    resolvedURL.setURL(getResolvedUrl(connection));
+    
+    return is;
+  }
+  
+  private int update(final int n) throws IOException {
+    if (n != -1) {
       currentPos += n;
       currentPos += n;
     } else if (currentPos < filelength) {
     } else if (currentPos < filelength) {
       throw new IOException("Got EOF but currentPos = " + currentPos
       throw new IOException("Got EOF but currentPos = " + currentPos
           + " < filelength = " + filelength);
           + " < filelength = " + filelength);
     }
     }
+    return n;
   }
   }
 
 
+  @Override
   public int read() throws IOException {
   public int read() throws IOException {
     final int b = getInputStream().read();
     final int b = getInputStream().read();
-    update(b == -1, 1);
+    update((b == -1) ? -1 : 1);
     return b;
     return b;
   }
   }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+    return update(getInputStream().read(b, off, len));
+  }
   
   
   /**
   /**
    * Seek to the given offset from the start of the file.
    * Seek to the given offset from the start of the file.
    * The next read() will be from that location.  Can't
    * The next read() will be from that location.  Can't
    * seek past the end of the file.
    * seek past the end of the file.
    */
    */
+  @Override
   public void seek(long pos) throws IOException {
   public void seek(long pos) throws IOException {
     if (pos != currentPos) {
     if (pos != currentPos) {
       startPos = pos;
       startPos = pos;
       currentPos = pos;
       currentPos = pos;
-      status = StreamStatus.SEEK;
+      if (status != StreamStatus.CLOSED) {
+        status = StreamStatus.SEEK;
+      }
     }
     }
   }
   }
 
 
   /**
   /**
    * Return the current offset from the start of the file
    * Return the current offset from the start of the file
    */
    */
+  @Override
   public long getPos() throws IOException {
   public long getPos() throws IOException {
     return currentPos;
     return currentPos;
   }
   }
@@ -156,7 +186,17 @@ public abstract class ByteRangeInputStream extends FSInputStream {
    * Seeks a different copy of the data.  Returns true if
    * Seeks a different copy of the data.  Returns true if
    * found a new source, false otherwise.
    * found a new source, false otherwise.
    */
    */
+  @Override
   public boolean seekToNewSource(long targetPos) throws IOException {
   public boolean seekToNewSource(long targetPos) throws IOException {
     return false;
     return false;
   }
   }
-}
+  
+  @Override
+  public void close() throws IOException {
+    if (in != null) {
+      in.close();
+      in = null;
+    }
+    status = StreamStatus.CLOSED;
+  }
+}

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -78,7 +78,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -91,6 +90,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -996,7 +996,7 @@ public class DFSClient implements java.io.Closeable {
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * long, Progressable, int)} with <code>createParent</code> set to true.
    * long, Progressable, int)} with <code>createParent</code> set to true.
    */
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              EnumSet<CreateFlag> flag, 
                              short replication,
                              short replication,
@@ -1029,7 +1029,7 @@ public class DFSClient implements java.io.Closeable {
    * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
    * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
    * boolean, short, long) for detailed description of exceptions thrown
    * boolean, short, long) for detailed description of exceptions thrown
    */
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              EnumSet<CreateFlag> flag, 
                              boolean createParent,
                              boolean createParent,
@@ -1046,9 +1046,9 @@ public class DFSClient implements java.io.Closeable {
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug(src + ": masked=" + masked);
       LOG.debug(src + ": masked=" + masked);
     }
     }
-    final DFSOutputStream result = new DFSOutputStream(this, src, masked, flag,
-        createParent, replication, blockSize, progress, buffersize,
-        dfsClientConf.createChecksum());
+    final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
+        src, masked, flag, createParent, replication, blockSize, progress,
+        buffersize, dfsClientConf.createChecksum());
     leaserenewer.put(src, result, this);
     leaserenewer.put(src, result, this);
     return result;
     return result;
   }
   }
@@ -1078,7 +1078,7 @@ public class DFSClient implements java.io.Closeable {
    *  Progressable, int)} except that the permission
    *  Progressable, int)} except that the permission
    *  is absolute (ie has already been masked with umask.
    *  is absolute (ie has already been masked with umask.
    */
    */
-  public OutputStream primitiveCreate(String src, 
+  public DFSOutputStream primitiveCreate(String src, 
                              FsPermission absPermission,
                              FsPermission absPermission,
                              EnumSet<CreateFlag> flag,
                              EnumSet<CreateFlag> flag,
                              boolean createParent,
                              boolean createParent,
@@ -1095,7 +1095,7 @@ public class DFSClient implements java.io.Closeable {
       DataChecksum checksum = DataChecksum.newDataChecksum(
       DataChecksum checksum = DataChecksum.newDataChecksum(
           dfsClientConf.checksumType,
           dfsClientConf.checksumType,
           bytesPerChecksum);
           bytesPerChecksum);
-      result = new DFSOutputStream(this, src, absPermission,
+      result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
           flag, createParent, replication, blockSize, progress, buffersize,
           checksum);
           checksum);
     }
     }
@@ -1154,7 +1154,7 @@ public class DFSClient implements java.io.Closeable {
                                      UnsupportedOperationException.class,
                                      UnsupportedOperationException.class,
                                      UnresolvedPathException.class);
                                      UnresolvedPathException.class);
     }
     }
-    return new DFSOutputStream(this, src, buffersize, progress,
+    return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
         lastBlock, stat, dfsClientConf.createChecksum());
         lastBlock, stat, dfsClientConf.createChecksum());
   }
   }
   
   
@@ -1169,11 +1169,11 @@ public class DFSClient implements java.io.Closeable {
    * 
    * 
    * @see ClientProtocol#append(String, String) 
    * @see ClientProtocol#append(String, String) 
    */
    */
-  public FSDataOutputStream append(final String src, final int buffersize,
+  public HdfsDataOutputStream append(final String src, final int buffersize,
       final Progressable progress, final FileSystem.Statistics statistics
       final Progressable progress, final FileSystem.Statistics statistics
       ) throws IOException {
       ) throws IOException {
     final DFSOutputStream out = append(src, buffersize, progress);
     final DFSOutputStream out = append(src, buffersize, progress);
-    return new FSDataOutputStream(out, statistics, out.getInitialLen());
+    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
   }
   }
 
 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 

+ 39 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -118,6 +118,39 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
    * Grab the open-file info from namenode
    * Grab the open-file info from namenode
    */
    */
   synchronized void openInfo() throws IOException, UnresolvedLinkException {
   synchronized void openInfo() throws IOException, UnresolvedLinkException {
+    lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+    int retriesForLastBlockLength = 3;
+    while (retriesForLastBlockLength > 0) {
+      // Getting last block length as -1 is a special case. When cluster
+      // restarts, DNs may not report immediately. At this time partial block
+      // locations will not be available with NN for getting the length. Lets
+      // retry for 3 times to get the length.
+      if (lastBlockBeingWrittenLength == -1) {
+        DFSClient.LOG.warn("Last block locations not available. "
+            + "Datanodes might not have reported blocks completely."
+            + " Will retry for " + retriesForLastBlockLength + " times");
+        waitFor(4000);
+        lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+      } else {
+        break;
+      }
+      retriesForLastBlockLength--;
+    }
+    if (retriesForLastBlockLength == 0) {
+      throw new IOException("Could not obtain the last block locations.");
+    }
+  }
+
+  private void waitFor(int waitTime) throws IOException {
+    try {
+      Thread.sleep(waitTime);
+    } catch (InterruptedException e) {
+      throw new IOException(
+          "Interrupted while getting the last block length.");
+    }
+  }
+
+  private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException {
     LocatedBlocks newInfo = DFSClient.callGetBlockLocations(dfsClient.namenode, src, 0, prefetchSize);
     LocatedBlocks newInfo = DFSClient.callGetBlockLocations(dfsClient.namenode, src, 0, prefetchSize);
     if (DFSClient.LOG.isDebugEnabled()) {
     if (DFSClient.LOG.isDebugEnabled()) {
       DFSClient.LOG.debug("newInfo = " + newInfo);
       DFSClient.LOG.debug("newInfo = " + newInfo);
@@ -136,10 +169,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       }
       }
     }
     }
     locatedBlocks = newInfo;
     locatedBlocks = newInfo;
-    lastBlockBeingWrittenLength = 0;
+    long lastBlockBeingWrittenLength = 0;
     if (!locatedBlocks.isLastBlockComplete()) {
     if (!locatedBlocks.isLastBlockComplete()) {
       final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
       final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
       if (last != null) {
       if (last != null) {
+        if (last.getLocations().length == 0) {
+          return -1;
+        }
         final long len = readBlockLength(last);
         final long len = readBlockLength(last);
         last.getBlock().setNumBytes(len);
         last.getBlock().setNumBytes(len);
         lastBlockBeingWrittenLength = len; 
         lastBlockBeingWrittenLength = len; 
@@ -147,13 +183,12 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
     }
     }
 
 
     currentNode = null;
     currentNode = null;
+    return lastBlockBeingWrittenLength;
   }
   }
 
 
   /** Read the block length from one of the datanodes. */
   /** Read the block length from one of the datanodes. */
   private long readBlockLength(LocatedBlock locatedblock) throws IOException {
   private long readBlockLength(LocatedBlock locatedblock) throws IOException {
-    if (locatedblock == null || locatedblock.getLocations().length == 0) {
-      return 0;
-    }
+    assert locatedblock != null : "LocatedBlock cannot be null";
     int replicaNotFoundCount = locatedblock.getLocations().length;
     int replicaNotFoundCount = locatedblock.getLocations().length;
     
     
     for(DatanodeInfo datanode : locatedblock.getLocations()) {
     for(DatanodeInfo datanode : locatedblock.getLocations()) {

+ 41 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -99,7 +99,7 @@ import org.apache.hadoop.util.Progressable;
  * starts sending packets from the dataQueue.
  * starts sending packets from the dataQueue.
 ****************************************************************/
 ****************************************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-class DFSOutputStream extends FSOutputSummer implements Syncable {
+public class DFSOutputStream extends FSOutputSummer implements Syncable {
   private final DFSClient dfsClient;
   private final DFSClient dfsClient;
   private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB
   private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB
   private Socket s;
   private Socket s;
@@ -1233,14 +1233,11 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
     this.checksum = checksum;
     this.checksum = checksum;
   }
   }
 
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize, Progressable progress,
-      int buffersize, DataChecksum checksum) 
-      throws IOException {
+  /** Construct a new output stream for creating a file. */
+  private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
     this(dfsClient, src, blockSize, progress, checksum, replication);
     this(dfsClient, src, blockSize, progress, checksum, replication);
 
 
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
@@ -1260,14 +1257,21 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
                                      UnresolvedPathException.class);
                                      UnresolvedPathException.class);
     }
     }
     streamer = new DataStreamer();
     streamer = new DataStreamer();
-    streamer.start();
   }
   }
 
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
+  static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
+      FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
+      short replication, long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
+        flag, createParent, replication, blockSize, progress, buffersize,
+        checksum);
+    out.streamer.start();
+    return out;
+  }
+
+  /** Construct a new output stream for append. */
+  private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
       LocatedBlock lastBlock, HdfsFileStatus stat,
       LocatedBlock lastBlock, HdfsFileStatus stat,
       DataChecksum checksum) throws IOException {
       DataChecksum checksum) throws IOException {
     this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
     this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
@@ -1285,7 +1289,15 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
           checksum.getBytesPerChecksum());
           checksum.getBytesPerChecksum());
       streamer = new DataStreamer();
       streamer = new DataStreamer();
     }
     }
-    streamer.start();
+  }
+
+  static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
+      int buffersize, Progressable progress, LocatedBlock lastBlock,
+      HdfsFileStatus stat, DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize,
+        progress, lastBlock, stat, checksum);
+    out.streamer.start();
+    return out;
   }
   }
 
 
   private void computePacketChunkSize(int psize, int csize) {
   private void computePacketChunkSize(int psize, int csize) {
@@ -1530,14 +1542,20 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
   }
   }
 
 
   /**
   /**
-   * Returns the number of replicas of current block. This can be different
-   * from the designated replication factor of the file because the NameNode
-   * does not replicate the block to which a client is currently writing to.
-   * The client continues to write to a block even if a few datanodes in the
-   * write pipeline have failed. 
-   * @return the number of valid replicas of the current block
+   * @deprecated use {@link HdfsDataOutputStream#getCurrentBlockReplication()}.
    */
    */
+  @Deprecated
   public synchronized int getNumCurrentReplicas() throws IOException {
   public synchronized int getNumCurrentReplicas() throws IOException {
+    return getCurrentBlockReplication();
+  }
+
+  /**
+   * Note that this is not a public API;
+   * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead.
+   * 
+   * @return the number of valid replicas of the current block
+   */
+  public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
     dfsClient.checkOpen();
     isClosed();
     isClosed();
     if (streamer == null) {
     if (streamer == null) {

+ 29 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -45,8 +44,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,6 +88,17 @@ public class DistributedFileSystem extends FileSystem {
   public DistributedFileSystem() {
   public DistributedFileSystem() {
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hdfs";
+  }
+
   @Deprecated
   @Deprecated
   public DistributedFileSystem(InetSocketAddress namenode,
   public DistributedFileSystem(InetSocketAddress namenode,
     Configuration conf) throws IOException {
     Configuration conf) throws IOException {
@@ -205,31 +215,33 @@ public class DistributedFileSystem extends FileSystem {
 
 
   /** This optional operation is not yet supported. */
   /** This optional operation is not yet supported. */
   @Override
   @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
+  public HdfsDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     return dfs.append(getPathName(f), bufferSize, progress, statistics);
     return dfs.append(getPathName(f), bufferSize, progress, statistics);
   }
   }
 
 
   @Override
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
+  public HdfsDataOutputStream create(Path f, FsPermission permission,
     boolean overwrite, int bufferSize, short replication, long blockSize,
     boolean overwrite, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
     Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission,
-        overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
-            : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
-        bufferSize), statistics);
+    final EnumSet<CreateFlag> cflags = overwrite?
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+        : EnumSet.of(CreateFlag.CREATE);
+    final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
+        replication, blockSize, progress, bufferSize);
+    return new HdfsDataOutputStream(out, statistics);
   }
   }
   
   
   @SuppressWarnings("deprecation")
   @SuppressWarnings("deprecation")
   @Override
   @Override
-  protected FSDataOutputStream primitiveCreate(Path f,
+  protected HdfsDataOutputStream primitiveCreate(Path f,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     short replication, long blockSize, Progressable progress,
     short replication, long blockSize, Progressable progress,
     int bytesPerChecksum) throws IOException {
     int bytesPerChecksum) throws IOException {
     statistics.incrementReadOps(1);
     statistics.incrementReadOps(1);
-    return new FSDataOutputStream(dfs.primitiveCreate(getPathName(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
         absolutePermission, flag, true, replication, blockSize,
         absolutePermission, flag, true, replication, blockSize,
         progress, bufferSize, bytesPerChecksum),statistics);
         progress, bufferSize, bytesPerChecksum),statistics);
    } 
    } 
@@ -237,14 +249,14 @@ public class DistributedFileSystem extends FileSystem {
   /**
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
    * Same as create(), except fails if parent directory doesn't already exist.
    */
    */
-  public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+  public HdfsDataOutputStream createNonRecursive(Path f, FsPermission permission,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       long blockSize, Progressable progress) throws IOException {
       long blockSize, Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     if (flag.contains(CreateFlag.OVERWRITE)) {
     if (flag.contains(CreateFlag.OVERWRITE)) {
       flag.add(CreateFlag.CREATE);
       flag.add(CreateFlag.CREATE);
     }
     }
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission, flag,
+    return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
         false, replication, blockSize, progress, bufferSize), statistics);
         false, replication, blockSize, progress, bufferSize), statistics);
   }
   }
 
 
@@ -625,19 +637,18 @@ public class DistributedFileSystem extends FileSystem {
   // We do not see a need for user to report block checksum errors and do not  
   // We do not see a need for user to report block checksum errors and do not  
   // want to rely on user to report block corruptions.
   // want to rely on user to report block corruptions.
   @Deprecated
   @Deprecated
-  @SuppressWarnings("deprecation")
   public boolean reportChecksumFailure(Path f, 
   public boolean reportChecksumFailure(Path f, 
     FSDataInputStream in, long inPos, 
     FSDataInputStream in, long inPos, 
     FSDataInputStream sums, long sumsPos) {
     FSDataInputStream sums, long sumsPos) {
     
     
-    if(!(in instanceof DFSDataInputStream && sums instanceof DFSDataInputStream))
-      throw new IllegalArgumentException("Input streams must be types " +
-                                         "of DFSDataInputStream");
+    if(!(in instanceof HdfsDataInputStream && sums instanceof HdfsDataInputStream))
+      throw new IllegalArgumentException(
+          "Input streams must be types of HdfsDataInputStream");
     
     
     LocatedBlock lblocks[] = new LocatedBlock[2];
     LocatedBlock lblocks[] = new LocatedBlock[2];
 
 
     // Find block in data stream.
     // Find block in data stream.
-    DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in;
+    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
     ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     if (dataBlock == null) {
     if (dataBlock == null) {
       LOG.error("Error: Current block in data stream is null! ");
       LOG.error("Error: Current block in data stream is null! ");
@@ -650,7 +661,7 @@ public class DistributedFileSystem extends FileSystem {
         + dataNode[0]);
         + dataNode[0]);
 
 
     // Find block in checksum stream
     // Find block in checksum stream
-    DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
+    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
     ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     if (sumsBlock == null) {
     if (sumsBlock == null) {
       LOG.error("Error: Current block in checksum stream is null! ");
       LOG.error("Error: Current block in checksum stream is null! ");

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -154,6 +154,17 @@ public class HftpFileSystem extends FileSystem
     return SecurityUtil.buildTokenService(nnSecureUri).toString();
     return SecurityUtil.buildTokenService(nnSecureUri).toString();
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hftp";
+  }
+
   @Override
   @Override
   public void initialize(final URI name, final Configuration conf)
   public void initialize(final URI name, final Configuration conf)
   throws IOException {
   throws IOException {

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java

@@ -58,6 +58,17 @@ public class HsftpFileSystem extends HftpFileSystem {
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
   private volatile int ExpWarnDays = 0;
   private volatile int ExpWarnDays = 0;
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hsftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hsftp";
+  }
+
   @Override
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
     super.initialize(name, conf);

+ 59 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+
+/**
+ * The Hdfs implementation of {@link FSDataOutputStream}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsDataOutputStream extends FSDataOutputStream {
+  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats,
+      long startPosition) throws IOException {
+    super(out, stats, startPosition);
+  }
+
+  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats
+      ) throws IOException {
+    this(out, stats, 0L);
+  }
+
+  /**
+   * Get the actual number of replicas of the current block.
+   * 
+   * This can be different from the designated replication factor of the file
+   * because the namenode does not maintain replication for the blocks which are
+   * currently being written to. Depending on the configuration, the client may
+   * continue to write to a block even if a few datanodes in the write pipeline
+   * have failed, or the client may add a new datanodes once a datanode has
+   * failed.
+   * 
+   * @return the number of valid replicas of the current block
+   */
+  public synchronized int getCurrentBlockReplication() throws IOException {
+    return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
+  }
+}

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -651,7 +651,6 @@ public class DatanodeManager {
    * checks if any of the hosts have changed states:
    * checks if any of the hosts have changed states:
    */
    */
   public void refreshNodes(final Configuration conf) throws IOException {
   public void refreshNodes(final Configuration conf) throws IOException {
-    namesystem.checkSuperuserPrivilege();
     refreshHostsReader(conf);
     refreshHostsReader(conf);
     namesystem.writeLock();
     namesystem.writeLock();
     try {
     try {

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java

@@ -50,7 +50,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -398,22 +398,21 @@ public class DatanodeWebHdfsMethods {
     {
     {
       final int b = bufferSize.getValue(conf);
       final int b = bufferSize.getValue(conf);
       final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
       final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
-      DFSDataInputStream in = null;
+      HdfsDataInputStream in = null;
       try {
       try {
-        in = new DFSClient.DFSDataInputStream(
-            dfsclient.open(fullpath, b, true));
+        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
         in.seek(offset.getValue());
         in.seek(offset.getValue());
       } catch(IOException ioe) {
       } catch(IOException ioe) {
         IOUtils.cleanup(LOG, in);
         IOUtils.cleanup(LOG, in);
         IOUtils.cleanup(LOG, dfsclient);
         IOUtils.cleanup(LOG, dfsclient);
         throw ioe;
         throw ioe;
       }
       }
-      final DFSDataInputStream dis = in;
+      final HdfsDataInputStream dis = in;
       final StreamingOutput streaming = new StreamingOutput() {
       final StreamingOutput streaming = new StreamingOutput() {
         @Override
         @Override
         public void write(final OutputStream out) throws IOException {
         public void write(final OutputStream out) throws IOException {
           final Long n = length.getValue();
           final Long n = length.getValue();
-          DFSDataInputStream dfsin = dis;
+          HdfsDataInputStream dfsin = dis;
           DFSClient client = dfsclient;
           DFSClient client = dfsclient;
           try {
           try {
             if (n == null) {
             if (n == null) {

+ 21 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -457,7 +457,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
       this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
       this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
       this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
       this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
-      LOG.info("Append Enabled: " + haEnabled);
+      LOG.info("Append Enabled: " + supportAppends);
 
 
       this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
       this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
       
       
@@ -3332,8 +3332,26 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
   }
     
     
   void finalizeUpgrade() throws IOException {
   void finalizeUpgrade() throws IOException {
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkSuperuserPrivilege();
+      getFSImage().finalizeUpgrade();
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  void refreshNodes() throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
+    checkSuperuserPrivilege();
+    getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration());
+  }
+
+  void setBalancerBandwidth(long bandwidth) throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
     checkSuperuserPrivilege();
     checkSuperuserPrivilege();
-    getFSImage().finalizeUpgrade();
+    getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }
   }
 
 
   /**
   /**
@@ -3723,7 +3741,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           msg += String.format(
           msg += String.format(
             "The number of live datanodes %d needs an additional %d live "
             "The number of live datanodes %d needs an additional %d live "
             + "datanodes to reach the minimum number %d.",
             + "datanodes to reach the minimum number %d.",
-            numLive, (datanodeThreshold - numLive) + 1 , datanodeThreshold);
+            numLive, (datanodeThreshold - numLive), datanodeThreshold);
         }
         }
         msg += " " + leaveMsg;
         msg += " " + leaveMsg;
       } else {
       } else {

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -707,9 +707,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 
 
   @Override // ClientProtocol
   @Override // ClientProtocol
   public void refreshNodes() throws IOException {
   public void refreshNodes() throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().refreshNodes(
-        new HdfsConfiguration());
+    namesystem.refreshNodes();
   }
   }
 
 
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
@@ -732,7 +730,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
     
     
   @Override // ClientProtocol
   @Override // ClientProtocol
   public void finalizeUpgrade() throws IOException {
   public void finalizeUpgrade() throws IOException {
-    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.finalizeUpgrade();
     namesystem.finalizeUpgrade();
   }
   }
 
 
@@ -772,8 +769,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
    */
    */
   @Override // ClientProtocol
   @Override // ClientProtocol
   public void setBalancerBandwidth(long bandwidth) throws IOException {
   public void setBalancerBandwidth(long bandwidth) throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
+    namesystem.setBalancerBandwidth(bandwidth);
   }
   }
   
   
   @Override // ClientProtocol
   @Override // ClientProtocol

+ 25 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java

@@ -45,4 +45,29 @@ public class JournalInfo {
   public int getNamespaceId() {
   public int getNamespaceId() {
     return namespaceId;
     return namespaceId;
   }
   }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterId)
+    .append(";nsid=").append(namespaceId);
+    return sb.toString();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    JournalInfo jInfo;
+    if (!(o instanceof JournalInfo)) {
+      return false;
+    }
+    jInfo = (JournalInfo) o;
+    return ((jInfo.clusterId.equals(this.clusterId))
+        && (jInfo.namespaceId == this.namespaceId)
+        && (jInfo.layoutVersion == this.layoutVersion));
+  }
+  
+  @Override
+  public int hashCode() {
+    return (namespaceId ^ layoutVersion ^ clusterId.hashCode());
+  }
 }
 }

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -155,6 +155,17 @@ public class WebHdfsFileSystem extends FileSystem
     }
     }
   }
   }
 
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>webhdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "webhdfs";
+  }
+
   @Override
   @Override
   public synchronized void initialize(URI uri, Configuration conf
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4

@@ -71,7 +71,7 @@ AC_DEFUN([AP_SUPPORTED_HOST],[
   esac
   esac
 
 
   case $host_cpu in
   case $host_cpu in
-  powerpc)
+  powerpc*)
     CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
     CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
     HOST_CPU=$host_cpu;;
     HOST_CPU=$host_cpu;;
   sparc*)
   sparc*)

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdfs.DistributedFileSystem
+org.apache.hadoop.hdfs.HftpFileSystem
+org.apache.hadoop.hdfs.HsftpFileSystem
+org.apache.hadoop.hdfs.web.WebHdfsFileSystem

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -55,8 +55,8 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -515,15 +515,14 @@ public class DFSTestUtil {
   }
   }
   
   
   public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
   public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
-    DFSDataInputStream in = 
-      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
+    HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
     in.readByte();
     in.readByte();
     return in.getCurrentBlock();
     return in.getCurrentBlock();
   }  
   }  
 
 
   public static List<LocatedBlock> getAllBlocks(FSDataInputStream in)
   public static List<LocatedBlock> getAllBlocks(FSDataInputStream in)
       throws IOException {
       throws IOException {
-    return ((DFSClient.DFSDataInputStream) in).getAllBlocks();
+    return ((HdfsDataInputStream) in).getAllBlocks();
   }
   }
 
 
   public static Token<BlockTokenIdentifier> getBlockToken(
   public static Token<BlockTokenIdentifier> getBlockToken(

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -87,7 +87,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -103,7 +102,6 @@ import org.apache.hadoop.util.ToolRunner;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
-import com.google.common.io.Files;
 
 
 /**
 /**
  * This class creates a single-process DFS cluster for junit testing.
  * This class creates a single-process DFS cluster for junit testing.
@@ -1588,7 +1586,7 @@ public class MiniDFSCluster {
   /**
   /**
    * Get a client handle to the DFS cluster with a single namenode.
    * Get a client handle to the DFS cluster with a single namenode.
    */
    */
-  public FileSystem getFileSystem() throws IOException {
+  public DistributedFileSystem getFileSystem() throws IOException {
     checkSingleNameNode();
     checkSingleNameNode();
     return getFileSystem(0);
     return getFileSystem(0);
   }
   }
@@ -1596,8 +1594,9 @@ public class MiniDFSCluster {
   /**
   /**
    * Get a client handle to the DFS cluster for the namenode at given index.
    * Get a client handle to the DFS cluster for the namenode at given index.
    */
    */
-  public FileSystem getFileSystem(int nnIndex) throws IOException {
-    return FileSystem.get(getURI(nnIndex), nameNodes[nnIndex].conf);
+  public DistributedFileSystem getFileSystem(int nnIndex) throws IOException {
+    return (DistributedFileSystem)FileSystem.get(getURI(nnIndex),
+        nameNodes[nnIndex].conf);
   }
   }
 
 
   /**
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -48,7 +48,7 @@ public class TestBlocksScheduledCounter extends TestCase {
       out.write(i);
       out.write(i);
     }
     }
     // flush to make sure a block is allocated.
     // flush to make sure a block is allocated.
-    ((DFSOutputStream)(out.getWrappedStream())).hflush();
+    out.hflush();
     
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(

+ 78 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java

@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verify;
@@ -31,6 +33,7 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 
 
+import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestByteRangeInputStream {
 public class TestByteRangeInputStream {
@@ -84,6 +87,11 @@ public static class MockHttpURLConnection extends HttpURLConnection {
   public void setResponseCode(int resCode) {
   public void setResponseCode(int resCode) {
     responseCode = resCode;
     responseCode = resCode;
   }
   }
+  
+  @Override
+  public String getHeaderField(String field) {
+    return (field.equalsIgnoreCase(StreamFile.CONTENT_LENGTH)) ? "65535" : null;
+  }
 }
 }
   
   
   @Test
   @Test
@@ -163,4 +171,74 @@ public static class MockHttpURLConnection extends HttpURLConnection {
                    "HTTP_OK expected, received 206", e.getMessage());
                    "HTTP_OK expected, received 206", e.getMessage());
     }
     }
   }
   }
+  
+  @Test
+  public void testPropagatedClose() throws IOException {
+    ByteRangeInputStream brs = spy(
+        new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/")));
+    
+    InputStream mockStream = mock(InputStream.class);
+    doReturn(mockStream).when(brs).openInputStream();
+
+    int brisOpens = 0;
+    int brisCloses = 0;
+    int isCloses = 0;
+    
+    // first open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // stream is open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // seek forces a reopen, should close underlying stream
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+
+    // verify that the underlying stream isn't closed after a seek
+    // ie. the state was correctly updated
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // seeking to same location should be a no-op
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // close should of course close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+    
+    // it's already closed, underlying stream should not close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // it's closed, don't reopen it
+    boolean errored = false;
+    try {
+      brs.getInputStream();
+    } catch (IOException e) {
+      errored = true;
+      assertEquals("Stream closed", e.getMessage());
+    } finally {
+      assertTrue("Read a closed steam", errored);
+    }
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+  }
 }
 }

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
@@ -143,10 +144,10 @@ public class TestDecommission {
     String downnode, int numDatanodes) throws IOException {
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     boolean isNodeDown = (downnode != null);
     // need a raw stream
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(), 
-    fileSys instanceof DistributedFileSystem);
-    DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream)
-      ((DistributedFileSystem)fileSys).open(name);
+    assertTrue("Not HDFS:"+fileSys.getUri(),
+        fileSys instanceof DistributedFileSystem);
+    HdfsDataInputStream dis = (HdfsDataInputStream)
+        ((DistributedFileSystem)fileSys).open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     for (LocatedBlock blk : dinfo) { // for each block
     for (LocatedBlock blk : dinfo) { // for each block
       int hasdown = 0;
       int hasdown = 0;

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assume.assumeTrue;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
@@ -53,6 +54,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -68,8 +70,6 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 
 
-import static org.junit.Assume.assumeTrue;
-
 /**
 /**
  * This class tests various cases during file creation.
  * This class tests various cases during file creation.
  */
  */
@@ -99,6 +99,11 @@ public class TestFileCreation extends junit.framework.TestCase {
     return stm;
     return stm;
   }
   }
 
 
+  public static HdfsDataOutputStream create(DistributedFileSystem dfs,
+      Path name, int repl) throws IOException {
+    return (HdfsDataOutputStream)createFile(dfs, name, repl);
+  }
+
   //
   //
   // writes to file but does not close it
   // writes to file but does not close it
   //
   //
@@ -494,7 +499,7 @@ public class TestFileCreation extends junit.framework.TestCase {
 
 
     // create cluster
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    FileSystem fs = null;
+    DistributedFileSystem fs = null;
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
       fs = cluster.getFileSystem();
       fs = cluster.getFileSystem();
@@ -502,21 +507,17 @@ public class TestFileCreation extends junit.framework.TestCase {
 
 
       // create a new file.
       // create a new file.
       Path file1 = new Path("/filestatus.dat");
       Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      HdfsDataOutputStream stm = create(fs, file1, 1);
       System.out.println("testFileCreationNamenodeRestart: "
       System.out.println("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
                          + "Created file " + file1);
-      int actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
 
       // write two full blocks.
       // write two full blocks.
       writeFile(stm, numBlocks * blockSize);
       writeFile(stm, numBlocks * blockSize);
       stm.hflush();
       stm.hflush();
-      actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should still be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
 
       // rename file wile keeping it open.
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
       Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -849,11 +850,10 @@ public class TestFileCreation extends junit.framework.TestCase {
       // create a new file.
       // create a new file.
       final String f = DIR + "foo";
       final String f = DIR + "foo";
       final Path fpath = new Path(f);
       final Path fpath = new Path(f);
-      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
       out.write("something".getBytes());
       out.write("something".getBytes());
       out.hflush();
       out.hflush();
-      int actualRepl = ((DFSOutputStream)(out.getWrappedStream())).
-                        getNumCurrentReplicas();
+      int actualRepl = out.getCurrentBlockReplication();
       assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
       assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
                  actualRepl == DATANODE_NUM);
                  actualRepl == DATANODE_NUM);
 
 

+ 94 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java

@@ -0,0 +1,94 @@
+/**                                                                                                                               
+ * Licensed to the Apache Software Foundation (ASF) under one                                                                     
+ * or more contributor license agreements.  See the NOTICE file                                                                   
+ * distributed with this work for additional information                                                                          
+ * regarding copyright ownership.  The ASF licenses this file                                                                     
+ * to you under the Apache License, Version 2.0 (the                                                                              
+ * "License"); you may not use this file except in compliance                                                                     
+ * with the License.  You may obtain a copy of the License at                                                                     
+ *                                                                                                                                
+ *     http://www.apache.org/licenses/LICENSE-2.0                                                                                 
+ *                                                                                                                                
+ * Unless required by applicable law or agreed to in writing, software                                                            
+ * distributed under the License is distributed on an "AS IS" BASIS,                                                              
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.                                                       
+ * See the License for the specific language governing permissions and                                                            
+ * limitations under the License.                                                                                                 
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Test the fileLength on cluster restarts */
+public class TestFileLengthOnClusterRestart {
+  /**
+   * Tests the fileLength when we sync the file and restart the cluster and
+   * Datanodes not report to Namenode yet.
+   */
+  @Test(timeout = 60000)
+  public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
+      throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    // create cluster
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build();
+    HdfsDataInputStream in = null;
+    try {
+      Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test");
+      DistributedFileSystem dfs = (DistributedFileSystem) cluster
+          .getFileSystem();
+      FSDataOutputStream out = dfs.create(path);
+      int fileLength = 1030;
+      out.write(new byte[fileLength]);
+      out.hsync();
+      cluster.restartNameNode();
+      cluster.waitActive();
+      in = (HdfsDataInputStream) dfs.open(path, 1024);
+      // Verify the length when we just restart NN. DNs will register
+      // immediately.
+      Assert.assertEquals(fileLength, in.getVisibleLength());
+      cluster.shutdownDataNodes();
+      cluster.restartNameNode(false);
+      // This is just for ensuring NN started.
+      verifyNNIsInSafeMode(dfs);
+
+      try {
+        in = (HdfsDataInputStream) dfs.open(path);
+        Assert.fail("Expected IOException");
+      } catch (IOException e) {
+        Assert.assertEquals("Could not obtain the last block locations.", e
+            .getLocalizedMessage());
+      }
+    } finally {
+      if (null != in) {
+        in.close();
+      }
+      cluster.shutdown();
+
+    }
+  }
+
+  private void verifyNNIsInSafeMode(DistributedFileSystem dfs)
+      throws IOException {
+    while (true) {
+      try {
+        if (dfs.isInSafeMode()) {
+          return;
+        } else {
+          throw new IOException("Expected to be in SafeMode");
+        }
+      } catch (IOException e) {
+        // NN might not started completely Ignore
+      }
+    }
+  }
+}

+ 40 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URL;
 import java.net.URL;
@@ -234,6 +235,45 @@ public class TestHftpFileSystem {
     assertEquals('7', in.read());
     assertEquals('7', in.read());
   }
   }
 
 
+  @Test
+  public void testReadClosedStream() throws IOException {
+    final Path testFile = new Path("/testfile+2");
+    FSDataOutputStream os = hdfs.create(testFile, true);
+    os.writeBytes("0123456789");
+    os.close();
+
+    // ByteRangeInputStream delays opens until reads.  Make sure it doesn't
+    // open a closed stream that has never been opened
+    FSDataInputStream in = hftpFs.open(testFile);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // force the stream to connect and then close it
+    in = hftpFs.open(testFile);
+    int ch = in.read(); 
+    assertEquals('0', ch);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // make sure seeking doesn't automagically reopen the stream
+    in.seek(4);
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+  }
+  
+  private void checkClosedStream(InputStream is) {
+    IOException ioe = null;
+    try {
+      is.read();
+    } catch (IOException e) {
+      ioe = e;
+    }
+    assertNotNull("No exception on closed read", ioe);
+    assertEquals("Stream closed", ioe.getMessage());
+  }
+  
   public void resetFileSystem() throws IOException {
   public void resetFileSystem() throws IOException {
     // filesystem caching has a quirk/bug that it caches based on the user's
     // filesystem caching has a quirk/bug that it caches based on the user's
     // given uri.  the result is if a filesystem is instantiated with no port,
     // given uri.  the result is if a filesystem is instantiated with no port,

+ 37 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -90,7 +92,7 @@ public class TestLeaseRecovery2 {
 
 
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster.waitActive();
     cluster.waitActive();
-    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    dfs = cluster.getFileSystem();
   }
   }
   
   
   /**
   /**
@@ -406,17 +408,26 @@ public class TestLeaseRecovery2 {
    */
    */
   @Test
   @Test
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
-    hardLeaseRecoveryRestartHelper(false);
+    hardLeaseRecoveryRestartHelper(false, -1);
   }
   }
-  
+
+  @Test
+  public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
+    hardLeaseRecoveryRestartHelper(false, 1535);
+  }
+
   @Test
   @Test
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
       throws Exception {
       throws Exception {
-    hardLeaseRecoveryRestartHelper(true);
+    hardLeaseRecoveryRestartHelper(true, -1);
   }
   }
   
   
-  public void hardLeaseRecoveryRestartHelper(boolean doRename)
+  public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
       throws Exception {
       throws Exception {
+    if (size < 0) {
+      size =  AppendTestUtil.nextInt(FILE_SIZE + 1);
+    }
+
     //create a file
     //create a file
     String fileStr = "/hardLeaseRecovery";
     String fileStr = "/hardLeaseRecovery";
     AppendTestUtil.LOG.info("filestr=" + fileStr);
     AppendTestUtil.LOG.info("filestr=" + fileStr);
@@ -426,7 +437,6 @@ public class TestLeaseRecovery2 {
     assertTrue(dfs.dfs.exists(fileStr));
     assertTrue(dfs.dfs.exists(fileStr));
 
 
     // write bytes into the file.
     // write bytes into the file.
-    int size = AppendTestUtil.nextInt(FILE_SIZE);
     AppendTestUtil.LOG.info("size=" + size);
     AppendTestUtil.LOG.info("size=" + size);
     stm.write(buffer, 0, size);
     stm.write(buffer, 0, size);
     
     
@@ -440,6 +450,11 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     stm.hflush();
     
     
+    // check visible length
+    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
+    Assert.assertEquals(size, in.getVisibleLength());
+    in.close();
+    
     if (doRename) {
     if (doRename) {
       fileStr += ".renamed";
       fileStr += ".renamed";
       Path renamedPath = new Path(fileStr);
       Path renamedPath = new Path(fileStr);
@@ -463,14 +478,11 @@ public class TestLeaseRecovery2 {
     // Make sure lease recovery begins.
     // Make sure lease recovery begins.
     Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
     Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
     
     
-    assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     
     cluster.restartNameNode(false);
     cluster.restartNameNode(false);
     
     
-    assertEquals("lease holder should still be the NN after restart",
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     
     // Let the DNs send heartbeats again.
     // Let the DNs send heartbeats again.
     for (DataNode dn : cluster.getDataNodes()) {
     for (DataNode dn : cluster.getDataNodes()) {
@@ -492,12 +504,12 @@ public class TestLeaseRecovery2 {
     assertEquals(size, locatedBlocks.getFileLength());
     assertEquals(size, locatedBlocks.getFileLength());
 
 
     // make sure that the client can't write data anymore.
     // make sure that the client can't write data anymore.
-    stm.write('b');
     try {
     try {
+      stm.write('b');
       stm.hflush();
       stm.hflush();
       fail("Should not be able to flush after we've lost the lease");
       fail("Should not be able to flush after we've lost the lease");
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.info("Expceted exception on hflush", e);
+      LOG.info("Expceted exception on write/hflush", e);
     }
     }
     
     
     try {
     try {
@@ -512,4 +524,16 @@ public class TestLeaseRecovery2 {
         "File size is good. Now validating sizes from datanodes...");
         "File size is good. Now validating sizes from datanodes...");
     AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
     AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
   }
   }
+  
+  static void checkLease(String f, int size) {
+    final String holder = NameNodeAdapter.getLeaseHolderForPath(
+        cluster.getNameNode(), f); 
+    if (size == 0) {
+      assertEquals("lease holder should null, file is closed", null, holder);
+    } else {
+      assertEquals("lease holder should now be the NN",
+          HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
+    }
+    
+  }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -147,7 +147,7 @@ public class TestReadWhileWriting {
     
     
     final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
     final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
     
     
-    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
+    final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
 
 
     //Check visible length
     //Check visible length
     Assert.assertTrue(in.getVisibleLength() >= expectedsize);
     Assert.assertTrue(in.getVisibleLength() >= expectedsize);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
@@ -187,7 +188,7 @@ public class TestReplaceDatanodeOnFailure {
 
 
   static class SlowWriter extends Thread {
   static class SlowWriter extends Thread {
     final Path filepath;
     final Path filepath;
-    private FSDataOutputStream out = null;
+    final HdfsDataOutputStream out;
     final long sleepms;
     final long sleepms;
     private volatile boolean running = true;
     private volatile boolean running = true;
     
     
@@ -195,7 +196,7 @@ public class TestReplaceDatanodeOnFailure {
         ) throws IOException {
         ) throws IOException {
       super(SlowWriter.class.getSimpleName() + ":" + filepath);
       super(SlowWriter.class.getSimpleName() + ":" + filepath);
       this.filepath = filepath;
       this.filepath = filepath;
-      this.out = fs.create(filepath, REPLICATION);
+      this.out = (HdfsDataOutputStream)fs.create(filepath, REPLICATION);
       this.sleepms = sleepms;
       this.sleepms = sleepms;
     }
     }
 
 
@@ -231,8 +232,7 @@ public class TestReplaceDatanodeOnFailure {
     }
     }
 
 
     void checkReplication() throws IOException {
     void checkReplication() throws IOException {
-      final DFSOutputStream dfsout = (DFSOutputStream)out.getWrappedStream();
-      Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas());
+      Assert.assertEquals(REPLICATION, out.getCurrentBlockReplication());
     }        
     }        
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -342,7 +342,7 @@ public class TestSafeMode {
     String tipMsg = cluster.getNamesystem().getSafemode();
     String tipMsg = cluster.getNamesystem().getSafemode();
     assertTrue("Safemode tip message looks right: " + tipMsg,
     assertTrue("Safemode tip message looks right: " + tipMsg,
                tipMsg.contains("The number of live datanodes 0 needs an additional " +
                tipMsg.contains("The number of live datanodes 0 needs an additional " +
-                               "2 live datanodes to reach the minimum number 1. " +
+                               "1 live datanodes to reach the minimum number 1. " +
                                "Safe mode will be turned off automatically."));
                                "Safe mode will be turned off automatically."));
 
 
     // Start a datanode
     // Start a datanode

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -119,7 +119,7 @@ public class TestShortCircuitLocalRead {
    */
    */
   static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
   static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
       int readOffset) throws IOException {
       int readOffset) throws IOException {
-    DFSDataInputStream stm = (DFSDataInputStream)fs.open(name);
+    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
 
     ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
     ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
 
 

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -155,7 +156,7 @@ public class TestWriteRead {
     try {
     try {
       in = openInputStream(path);
       in = openInputStream(path);
 
 
-      long visibleLenFromReadStream = getVisibleFileLength(in);
+      long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();
 
 
       if (visibleLenFromReadStream < byteExpected)
       if (visibleLenFromReadStream < byteExpected)
       {
       {
@@ -418,11 +419,6 @@ public class TestWriteRead {
     return fileStatus.getLen();
     return fileStatus.getLen();
   }
   }
 
 
-  private long getVisibleFileLength(FSDataInputStream in) throws IOException {
-    DFSClient.DFSDataInputStream din = (DFSClient.DFSDataInputStream) in;
-    return din.getVisibleLength();
-  }
-
   private boolean ifExists(Path path) throws IOException {
   private boolean ifExists(Path path) throws IOException {
     return useFCOption ? mfc.util().exists(path) : mfs.exists(path);
     return useFCOption ? mfc.util().exists(path) : mfs.exists(path);
   }
   }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -126,7 +126,8 @@ public class NameNodeAdapter {
   }
   }
 
 
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
-    return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
+    Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
+    return l == null? null: l.getHolder();
   }
   }
 
 
   /**
   /**

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java

@@ -123,6 +123,17 @@ public class TestNetworkTopology extends TestCase {
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
     assertTrue(testNodes[2] == dataNodes[5]);
+    
+    // array contains local rack node which happens to be in position 0
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[5];
+    testNodes[2] = dataNodes[3];
+    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    // peudoSortByDistance does not take the "data center" layer into consideration 
+    // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
+    assertTrue(testNodes[0] == dataNodes[1]);
+    assertTrue(testNodes[1] == dataNodes[5]);
+    assertTrue(testNodes[2] == dataNodes[3]);
   }
   }
   
   
   public void testRemove() throws Exception {
   public void testRemove() throws Exception {

+ 27 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -154,6 +154,12 @@ Release 2.0.0 - UNRELEASED
     MAPREDUCE-4093. Improve RM WebApp start up when proxy address is not set
     MAPREDUCE-4093. Improve RM WebApp start up when proxy address is not set
     (Devaraj K vai bobby)
     (Devaraj K vai bobby)
 
 
+    MAPREDUCE-4138. Reduce memory usage of counters due to non-static nested
+    classes. (tomwhite)
+
+    MAPREDUCE-3883. Document yarn.nodemanager.delete.debug-delay-sec 
+    configuration property (Eugene Koontz via tgraves)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -260,6 +266,9 @@ Release 2.0.0 - UNRELEASED
     MAPREDUCE-4141. clover integration broken, also mapreduce poms are 
     MAPREDUCE-4141. clover integration broken, also mapreduce poms are 
     pulling in clover as a dependency. (phunt via tucu)
     pulling in clover as a dependency. (phunt via tucu)
 
 
+    MAPREDUCE-4193. broken doc link for yarn-default.xml in site.xml.
+    (phunt via tomwhite)
+
 Release 0.23.3 - UNRELEASED
 Release 0.23.3 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -403,6 +412,24 @@ Release 0.23.3 - UNRELEASED
 
 
     MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)
     MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)
 
 
+    MAPREDUCE-4194. ConcurrentModificationError in DirectoryCollection
+    (Jonathan Eagles via bobby)
+
+    MAPREDUCE-3613. web service calls header contains 2 content types
+    (tgraves)
+
+    MAPREDUCE-4169. Container Logs appear in unsorted order (Jonathan Eagles
+    via bobby)
+
+    MAPREDUCE-4189. TestContainerManagerSecurity is failing (Devaraj K via
+    bobby)
+
+    MAPREDUCE-4209. junit dependency in hadoop-mapreduce-client is missing
+    scope test (Radim Kolar via bobby)
+
+    MAPREDUCE-4206. Sorting by Last Health-Update on the RM nodes page sorts
+    does not work correctly (Jonathon Eagles via tgraves)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 21 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.GET;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.PathParam;
@@ -67,6 +68,8 @@ import com.google.inject.Inject;
 public class AMWebServices {
 public class AMWebServices {
   private final AppContext appCtx;
   private final AppContext appCtx;
   private final App app;
   private final App app;
+
+  private @Context HttpServletResponse response;
   
   
   @Inject
   @Inject
   public AMWebServices(final App app, final AppContext context) {
   public AMWebServices(final App app, final AppContext context) {
@@ -86,6 +89,11 @@ public class AMWebServices {
     return true;
     return true;
   }
   }
 
 
+  private void init() {
+    //clear content type
+    response.setContentType(null);
+  }
+
   /**
   /**
    * convert a job id string to an actual job and handle all the error checking.
    * convert a job id string to an actual job and handle all the error checking.
    */
    */
@@ -205,6 +213,7 @@ public class AMWebServices {
   @Path("/info")
   @Path("/info")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppInfo getAppInfo() {
   public AppInfo getAppInfo() {
+    init();
     return new AppInfo(this.app, this.app.context);
     return new AppInfo(this.app, this.app.context);
   }
   }
 
 
@@ -212,6 +221,7 @@ public class AMWebServices {
   @Path("/jobs")
   @Path("/jobs")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobsInfo getJobs(@Context HttpServletRequest hsr) {
   public JobsInfo getJobs(@Context HttpServletRequest hsr) {
+    init();
     JobsInfo allJobs = new JobsInfo();
     JobsInfo allJobs = new JobsInfo();
     for (Job job : appCtx.getAllJobs().values()) {
     for (Job job : appCtx.getAllJobs().values()) {
       // getAllJobs only gives you a partial we want a full
       // getAllJobs only gives you a partial we want a full
@@ -229,6 +239,7 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobInfo getJob(@Context HttpServletRequest hsr,
   public JobInfo getJob(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     return new JobInfo(job, hasAccess(job, hsr));
     return new JobInfo(job, hasAccess(job, hsr));
   }
   }
@@ -237,7 +248,7 @@ public class AMWebServices {
   @Path("/jobs/{jobid}/jobattempts")
   @Path("/jobs/{jobid}/jobattempts")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
   public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
-
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     AMAttemptsInfo amAttempts = new AMAttemptsInfo();
     AMAttemptsInfo amAttempts = new AMAttemptsInfo();
     for (AMInfo amInfo : job.getAMInfos()) {
     for (AMInfo amInfo : job.getAMInfos()) {
@@ -253,6 +264,7 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
   public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     return new JobCounterInfo(this.appCtx, job);
     return new JobCounterInfo(this.appCtx, job);
@@ -264,6 +276,7 @@ public class AMWebServices {
   public ConfInfo getJobConf(@Context HttpServletRequest hsr,
   public ConfInfo getJobConf(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     ConfInfo info;
     ConfInfo info;
@@ -282,6 +295,7 @@ public class AMWebServices {
   public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
   public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @QueryParam("type") String type) {
       @PathParam("jobid") String jid, @QueryParam("type") String type) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     TasksInfo allTasks = new TasksInfo();
     TasksInfo allTasks = new TasksInfo();
@@ -308,6 +322,7 @@ public class AMWebServices {
   public TaskInfo getJobTask(@Context HttpServletRequest hsr,
   public TaskInfo getJobTask(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     Task task = getTaskFromTaskIdString(tid, job);
     Task task = getTaskFromTaskIdString(tid, job);
@@ -321,6 +336,7 @@ public class AMWebServices {
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @PathParam("taskid") String tid) {
       @PathParam("taskid") String tid) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     Task task = getTaskFromTaskIdString(tid, job);
     Task task = getTaskFromTaskIdString(tid, job);
@@ -332,8 +348,9 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
   public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
-    TaskAttemptsInfo attempts = new TaskAttemptsInfo();
 
 
+    init();
+    TaskAttemptsInfo attempts = new TaskAttemptsInfo();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     Task task = getTaskFromTaskIdString(tid, job);
     Task task = getTaskFromTaskIdString(tid, job);
@@ -357,6 +374,7 @@ public class AMWebServices {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("attemptid") String attId) {
       @PathParam("attemptid") String attId) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     Task task = getTaskFromTaskIdString(tid, job);
     Task task = getTaskFromTaskIdString(tid, job);
@@ -375,6 +393,7 @@ public class AMWebServices {
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
 
 
+    init();
     Job job = getJobFromJobIdString(jid, appCtx);
     Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     Task task = getTaskFromTaskIdString(tid, job);
     Task task = getTaskFromTaskIdString(tid, job);

+ 2 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java

@@ -387,21 +387,13 @@ public class Counters
   private static class FrameworkGroupImpl<T extends Enum<T>>
   private static class FrameworkGroupImpl<T extends Enum<T>>
       extends FrameworkCounterGroup<T, Counter> {
       extends FrameworkCounterGroup<T, Counter> {
 
 
-    // Mix the framework counter implementation into the Counter interface
-    class FrameworkCounterImpl extends FrameworkCounter {
-      FrameworkCounterImpl(T key) {
-        super(key);
-      }
-
-    }
-
     FrameworkGroupImpl(Class<T> cls) {
     FrameworkGroupImpl(Class<T> cls) {
       super(cls);
       super(cls);
     }
     }
 
 
     @Override
     @Override
     protected Counter newCounter(T key) {
     protected Counter newCounter(T key) {
-      return new Counter(new FrameworkCounterImpl(key));
+      return new Counter(new FrameworkCounter<T>(key, getName()));
     }
     }
 
 
     @Override
     @Override
@@ -413,17 +405,9 @@ public class Counters
   // Mix the file system counter group implementation into the Group interface
   // Mix the file system counter group implementation into the Group interface
   private static class FSGroupImpl extends FileSystemCounterGroup<Counter> {
   private static class FSGroupImpl extends FileSystemCounterGroup<Counter> {
 
 
-    private class FSCounterImpl extends FSCounter {
-
-      FSCounterImpl(String scheme, FileSystemCounter key) {
-        super(scheme, key);
-      }
-
-    }
-
     @Override
     @Override
     protected Counter newCounter(String scheme, FileSystemCounter key) {
     protected Counter newCounter(String scheme, FileSystemCounter key) {
-      return new Counter(new FSCounterImpl(scheme, key));
+      return new Counter(new FSCounter(scheme, key));
     }
     }
 
 
     @Override
     @Override

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counters.java

@@ -49,8 +49,8 @@ public class Counters extends AbstractCounters<Counter, CounterGroup> {
     }
     }
 
 
     @Override
     @Override
-    protected FrameworkCounter newCounter(T key) {
-      return new FrameworkCounter(key);
+    protected FrameworkCounter<T> newCounter(T key) {
+      return new FrameworkCounter<T>(key, getName());
     }
     }
 
 
     @Override
     @Override

+ 2 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java

@@ -61,7 +61,7 @@ public abstract class FileSystemCounterGroup<C extends Counter>
   private static final Joiner DISP_JOINER = Joiner.on(": ");
   private static final Joiner DISP_JOINER = Joiner.on(": ");
 
 
   @InterfaceAudience.Private
   @InterfaceAudience.Private
-  public class FSCounter extends AbstractCounter {
+  public static class FSCounter extends AbstractCounter {
     final String scheme;
     final String scheme;
     final FileSystemCounter key;
     final FileSystemCounter key;
     private long value;
     private long value;
@@ -139,8 +139,7 @@ public abstract class FileSystemCounterGroup<C extends Counter>
   @Override
   @Override
   public void addCounter(C counter) {
   public void addCounter(C counter) {
     C ours;
     C ours;
-    if (counter instanceof FileSystemCounterGroup<?>.FSCounter) {
-      @SuppressWarnings("unchecked")
+    if (counter instanceof FileSystemCounterGroup.FSCounter) {
       FSCounter c = (FSCounter) counter;
       FSCounter c = (FSCounter) counter;
       ours = findCounter(c.scheme, c.key);
       ours = findCounter(c.scheme, c.key);
     }
     }

+ 6 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java

@@ -57,12 +57,14 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
    * Use old (which extends new) interface to make compatibility easier.
    * Use old (which extends new) interface to make compatibility easier.
    */
    */
   @InterfaceAudience.Private
   @InterfaceAudience.Private
-  public class FrameworkCounter extends AbstractCounter {
+  public static class FrameworkCounter<T extends Enum<T>> extends AbstractCounter {
     final T key;
     final T key;
+    final String groupName;
     private long value;
     private long value;
 
 
-    public FrameworkCounter(T ref) {
+    public FrameworkCounter(T ref, String groupName) {
       key = ref;
       key = ref;
+      this.groupName = groupName;
     }
     }
 
 
     @Override
     @Override
@@ -72,7 +74,7 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
 
 
     @Override
     @Override
     public String getDisplayName() {
     public String getDisplayName() {
-      return localizeCounterName(getName());
+      return ResourceBundles.getCounterName(groupName, getName(), getName());
     }
     }
 
 
     @Override
     @Override
@@ -131,10 +133,6 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
     this.displayName = displayName;
     this.displayName = displayName;
   }
   }
 
 
-    private String localizeCounterName(String counterName) {
-      return ResourceBundles.getCounterName(getName(), counterName, counterName);
-    }
-
   private T valueOf(String name) {
   private T valueOf(String name) {
     return Enum.valueOf(enumClass, name);
     return Enum.valueOf(enumClass, name);
   }
   }
@@ -204,7 +202,7 @@ public abstract class FrameworkCounterGroup<T extends Enum<T>,
     if (checkNotNull(other, "other counter group")
     if (checkNotNull(other, "other counter group")
         instanceof FrameworkCounterGroup<?, ?>) {
         instanceof FrameworkCounterGroup<?, ?>) {
       for (Counter counter : other) {
       for (Counter counter : other) {
-        findCounter(((FrameworkCounter) counter).key)
+        findCounter(((FrameworkCounter) counter).key.name())
             .increment(counter.getValue());
             .increment(counter.getValue());
       }
       }
     }
     }

+ 19 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.GET;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.PathParam;
@@ -66,6 +67,7 @@ public class HsWebServices {
   private final HistoryContext ctx;
   private final HistoryContext ctx;
   private WebApp webapp;
   private WebApp webapp;
 
 
+  private @Context HttpServletResponse response;
   @Context
   @Context
   UriInfo uriInfo;
   UriInfo uriInfo;
 
 
@@ -76,6 +78,11 @@ public class HsWebServices {
     this.webapp = webapp;
     this.webapp = webapp;
   }
   }
 
 
+  private void init() {
+    //clear content type
+    response.setContentType(null);
+  }
+
   @GET
   @GET
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public HistoryInfo get() {
   public HistoryInfo get() {
@@ -86,6 +93,7 @@ public class HsWebServices {
   @Path("/info")
   @Path("/info")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public HistoryInfo getHistoryInfo() {
   public HistoryInfo getHistoryInfo() {
+    init();
     return new HistoryInfo();
     return new HistoryInfo();
   }
   }
 
 
@@ -102,6 +110,7 @@ public class HsWebServices {
       @QueryParam("finishedTimeEnd") String finishEnd) {
       @QueryParam("finishedTimeEnd") String finishEnd) {
 
 
     Long countParam = null;
     Long countParam = null;
+    init();
     
     
     if (count != null && !count.isEmpty()) {
     if (count != null && !count.isEmpty()) {
       try {
       try {
@@ -183,6 +192,7 @@ public class HsWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobInfo getJob(@PathParam("jobid") String jid) {
   public JobInfo getJob(@PathParam("jobid") String jid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     return new JobInfo(job);
     return new JobInfo(job);
   }
   }
@@ -192,6 +202,7 @@ public class HsWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
   public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     AMAttemptsInfo amAttempts = new AMAttemptsInfo();
     AMAttemptsInfo amAttempts = new AMAttemptsInfo();
     for (AMInfo amInfo : job.getAMInfos()) {
     for (AMInfo amInfo : job.getAMInfos()) {
@@ -208,6 +219,7 @@ public class HsWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobCounterInfo getJobCounters(@PathParam("jobid") String jid) {
   public JobCounterInfo getJobCounters(@PathParam("jobid") String jid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     return new JobCounterInfo(this.ctx, job);
     return new JobCounterInfo(this.ctx, job);
   }
   }
@@ -217,6 +229,7 @@ public class HsWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ConfInfo getJobConf(@PathParam("jobid") String jid) {
   public ConfInfo getJobConf(@PathParam("jobid") String jid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     ConfInfo info;
     ConfInfo info;
     try {
     try {
@@ -234,6 +247,7 @@ public class HsWebServices {
   public TasksInfo getJobTasks(@PathParam("jobid") String jid,
   public TasksInfo getJobTasks(@PathParam("jobid") String jid,
       @QueryParam("type") String type) {
       @QueryParam("type") String type) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     TasksInfo allTasks = new TasksInfo();
     TasksInfo allTasks = new TasksInfo();
     for (Task task : job.getTasks().values()) {
     for (Task task : job.getTasks().values()) {
@@ -259,6 +273,7 @@ public class HsWebServices {
   public TaskInfo getJobTask(@PathParam("jobid") String jid,
   public TaskInfo getJobTask(@PathParam("jobid") String jid,
       @PathParam("taskid") String tid) {
       @PathParam("taskid") String tid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     return new TaskInfo(task);
     return new TaskInfo(task);
@@ -271,6 +286,7 @@ public class HsWebServices {
   public JobTaskCounterInfo getSingleTaskCounters(
   public JobTaskCounterInfo getSingleTaskCounters(
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     TaskId taskID = MRApps.toTaskID(tid);
     TaskId taskID = MRApps.toTaskID(tid);
     if (taskID == null) {
     if (taskID == null) {
@@ -289,6 +305,7 @@ public class HsWebServices {
   public TaskAttemptsInfo getJobTaskAttempts(@PathParam("jobid") String jid,
   public TaskAttemptsInfo getJobTaskAttempts(@PathParam("jobid") String jid,
       @PathParam("taskid") String tid) {
       @PathParam("taskid") String tid) {
 
 
+    init();
     TaskAttemptsInfo attempts = new TaskAttemptsInfo();
     TaskAttemptsInfo attempts = new TaskAttemptsInfo();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
@@ -310,6 +327,7 @@ public class HsWebServices {
   public TaskAttemptInfo getJobTaskAttemptId(@PathParam("jobid") String jid,
   public TaskAttemptInfo getJobTaskAttemptId(@PathParam("jobid") String jid,
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
     TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
@@ -328,6 +346,7 @@ public class HsWebServices {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("attemptid") String attId) {
       @PathParam("attemptid") String attId) {
 
 
+    init();
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
     TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
     TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml

@@ -124,6 +124,7 @@
     <dependency>
     <dependency>
       <groupId>junit</groupId>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <artifactId>junit</artifactId>
+      <scope>test</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.jboss.netty</groupId>
       <groupId>org.jboss.netty</groupId>

+ 1 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml

@@ -78,6 +78,7 @@
             <configuration>
             <configuration>
               <tasks>
               <tasks>
                 <copy file="src/main/resources/yarn-default.xml" todir="src/site/resources"/>
                 <copy file="src/main/resources/yarn-default.xml" todir="src/site/resources"/>
+                <copy file="src/main/xsl/configuration.xsl" todir="src/site/resources"/>
               </tasks>
               </tasks>
             </configuration>
             </configuration>
           </execution>
           </execution>

+ 11 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java

@@ -29,6 +29,9 @@ import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.Writer;
 import java.io.Writer;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
@@ -112,8 +115,11 @@ public class AggregatedLogFormat {
     // the entire k-v format
     // the entire k-v format
 
 
     public LogValue(List<String> rootLogDirs, ContainerId containerId) {
     public LogValue(List<String> rootLogDirs, ContainerId containerId) {
-      this.rootLogDirs = rootLogDirs;
+      this.rootLogDirs = new ArrayList<String>(rootLogDirs);
       this.containerId = containerId;
       this.containerId = containerId;
+
+      // Ensure logs are processed in lexical order
+      Collections.sort(this.rootLogDirs);
     }
     }
 
 
     public void write(DataOutputStream out) throws IOException {
     public void write(DataOutputStream out) throws IOException {
@@ -131,7 +137,10 @@ public class AggregatedLogFormat {
           continue; // ContainerDir may have been deleted by the user.
           continue; // ContainerDir may have been deleted by the user.
         }
         }
 
 
-        for (File logFile : containerLogDir.listFiles()) {
+        // Write out log files in lexical order
+        File[] logFiles = containerLogDir.listFiles();
+        Arrays.sort(logFiles);
+        for (File logFile : logFiles) {
 
 
           // Write the logFile Type
           // Write the logFile Type
           out.writeUTF(logFile.getName());
           out.writeUTF(logFile.getName());

+ 33 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -277,6 +277,26 @@
     <value>4</value>
     <value>4</value>
   </property>
   </property>
 
 
+  <property>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's 
+      DeletionService will delete the application's localized file directory
+      and log directory.
+      
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the 
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the 
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+  </property>
+
   <property>
   <property>
     <description>Heartbeat interval to RM</description>
     <description>Heartbeat interval to RM</description>
     <name>yarn.nodemanager.heartbeat.interval-ms</name>
     <name>yarn.nodemanager.heartbeat.interval-ms</name>
@@ -290,7 +310,12 @@
   </property>
   </property>
 
 
   <property>
   <property>
-    <description>List of directories to store localized files in.</description>
+    <description>List of directories to store localized files in. An 
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+   </description>
     <name>yarn.nodemanager.local-dirs</name>
     <name>yarn.nodemanager.local-dirs</name>
     <value>/tmp/nm-local-dir</value>
     <value>/tmp/nm-local-dir</value>
   </property>
   </property>
@@ -326,7 +351,13 @@
   </property>
   </property>
 
 
   <property>
   <property>
-    <description>Where to store container logs.</description>
+    <description>
+      Where to store container logs. An application's localized log directory 
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories 
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
     <name>yarn.nodemanager.log-dirs</name>
     <name>yarn.nodemanager.log-dirs</name>
     <value>/tmp/logs</value>
     <value>/tmp/logs</value>
   </property>
   </property>

+ 37 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/xsl/configuration.xsl

@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+  <td><xsl:value-of select="value"/></td>
+  <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

+ 9 - 16
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java

@@ -19,10 +19,9 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 package org.apache.hadoop.yarn.server.nodemanager;
 
 
 import java.io.File;
 import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.List;
-import java.util.ListIterator;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -41,23 +40,22 @@ class DirectoryCollection {
   private int numFailures;
   private int numFailures;
 
 
   public DirectoryCollection(String[] dirs) {
   public DirectoryCollection(String[] dirs) {
-    localDirs = new ArrayList<String>();
-    localDirs.addAll(Arrays.asList(dirs));
-    failedDirs = new ArrayList<String>();
+    localDirs = new CopyOnWriteArrayList<String>(dirs);
+    failedDirs = new CopyOnWriteArrayList<String>();
   }
   }
 
 
   /**
   /**
    * @return the current valid directories 
    * @return the current valid directories 
    */
    */
   synchronized List<String> getGoodDirs() {
   synchronized List<String> getGoodDirs() {
-    return localDirs;
+    return Collections.unmodifiableList(localDirs);
   }
   }
 
 
   /**
   /**
    * @return the failed directories
    * @return the failed directories
    */
    */
   synchronized List<String> getFailedDirs() {
   synchronized List<String> getFailedDirs() {
-    return failedDirs;
+    return Collections.unmodifiableList(failedDirs);
   }
   }
 
 
   /**
   /**
@@ -75,22 +73,17 @@ class DirectoryCollection {
    */
    */
   synchronized boolean checkDirs() {
   synchronized boolean checkDirs() {
     int oldNumFailures = numFailures;
     int oldNumFailures = numFailures;
-    ListIterator<String> it = localDirs.listIterator();
-    while (it.hasNext()) {
-      final String dir = it.next();
+    for (final String dir : localDirs) {
       try {
       try {
         DiskChecker.checkDir(new File(dir));
         DiskChecker.checkDir(new File(dir));
       } catch (DiskErrorException de) {
       } catch (DiskErrorException de) {
         LOG.warn("Directory " + dir + " error " +
         LOG.warn("Directory " + dir + " error " +
             de.getMessage() + ", removing from the list of valid directories.");
             de.getMessage() + ", removing from the list of valid directories.");
-        it.remove();
+        localDirs.remove(dir);
         failedDirs.add(dir);
         failedDirs.add(dir);
         numFailures++;
         numFailures++;
       }
       }
     }
     }
-    if (numFailures > oldNumFailures) {
-      return true;
-    }
-    return false;
+    return numFailures > oldNumFailures;
   }
   }
 }
 }

+ 7 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java

@@ -30,6 +30,8 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 
 
@@ -269,12 +271,15 @@ public class ContainerLogsPage extends NMView {
           }
           }
         }
         }
       } else {
       } else {
-        // Just print out the log-types
+        // Print out log types in lexical order
         List<File> containerLogsDirs = getContainerLogDirs(containerId,
         List<File> containerLogsDirs = getContainerLogDirs(containerId,
             dirsHandler);
             dirsHandler);
+        Collections.sort(containerLogsDirs);
         boolean foundLogFile = false;
         boolean foundLogFile = false;
         for (File containerLogsDir : containerLogsDirs) {
         for (File containerLogsDir : containerLogsDirs) {
-          for (File logFile : containerLogsDir.listFiles()) {
+          File[] logFiles = containerLogsDir.listFiles();
+          Arrays.sort(logFiles);
+          for (File logFile : logFiles) {
             foundLogFile = true;
             foundLogFile = true;
             html.p()
             html.p()
                 .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), 
                 .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), 

+ 15 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
 
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.GET;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.PathParam;
@@ -58,8 +59,11 @@ public class NMWebServices {
   private static RecordFactory recordFactory = RecordFactoryProvider
   private static RecordFactory recordFactory = RecordFactoryProvider
       .getRecordFactory(null);
       .getRecordFactory(null);
 
 
+  private @javax.ws.rs.core.Context 
+    HttpServletResponse response;
+
   @javax.ws.rs.core.Context
   @javax.ws.rs.core.Context
-  UriInfo uriInfo;
+    UriInfo uriInfo;
 
 
   @Inject
   @Inject
   public NMWebServices(final Context nm, final ResourceView view,
   public NMWebServices(final Context nm, final ResourceView view,
@@ -69,6 +73,11 @@ public class NMWebServices {
     this.webapp = webapp;
     this.webapp = webapp;
   }
   }
 
 
+  private void init() {
+    //clear content type
+    response.setContentType(null);
+  }
+
   @GET
   @GET
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public NodeInfo get() {
   public NodeInfo get() {
@@ -79,6 +88,7 @@ public class NMWebServices {
   @Path("/info")
   @Path("/info")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public NodeInfo getNodeInfo() {
   public NodeInfo getNodeInfo() {
+    init();
     return new NodeInfo(this.nmContext, this.rview);
     return new NodeInfo(this.nmContext, this.rview);
   }
   }
 
 
@@ -87,6 +97,7 @@ public class NMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppsInfo getNodeApps(@QueryParam("state") String stateQuery,
   public AppsInfo getNodeApps(@QueryParam("state") String stateQuery,
       @QueryParam("user") String userQuery) {
       @QueryParam("user") String userQuery) {
+    init();
     AppsInfo allApps = new AppsInfo();
     AppsInfo allApps = new AppsInfo();
     for (Entry<ApplicationId, Application> entry : this.nmContext
     for (Entry<ApplicationId, Application> entry : this.nmContext
         .getApplications().entrySet()) {
         .getApplications().entrySet()) {
@@ -116,6 +127,7 @@ public class NMWebServices {
   @Path("/apps/{appid}")
   @Path("/apps/{appid}")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppInfo getNodeApp(@PathParam("appid") String appId) {
   public AppInfo getNodeApp(@PathParam("appid") String appId) {
+    init();
     ApplicationId id = ConverterUtils.toApplicationId(recordFactory, appId);
     ApplicationId id = ConverterUtils.toApplicationId(recordFactory, appId);
     if (id == null) {
     if (id == null) {
       throw new NotFoundException("app with id " + appId + " not found");
       throw new NotFoundException("app with id " + appId + " not found");
@@ -132,6 +144,7 @@ public class NMWebServices {
   @Path("/containers")
   @Path("/containers")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ContainersInfo getNodeContainers() {
   public ContainersInfo getNodeContainers() {
+    init();
     ContainersInfo allContainers = new ContainersInfo();
     ContainersInfo allContainers = new ContainersInfo();
     for (Entry<ContainerId, Container> entry : this.nmContext.getContainers()
     for (Entry<ContainerId, Container> entry : this.nmContext.getContainers()
         .entrySet()) {
         .entrySet()) {
@@ -151,6 +164,7 @@ public class NMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ContainerInfo getNodeContainer(@PathParam("containerid") String id) {
   public ContainerInfo getNodeContainer(@PathParam("containerid") String id) {
     ContainerId containerId = null;
     ContainerId containerId = null;
+    init();
     try {
     try {
       containerId = ConverterUtils.toContainerId(id);
       containerId = ConverterUtils.toContainerId(id);
     } catch (Exception e) {
     } catch (Exception e) {

+ 68 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java

@@ -0,0 +1,68 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.ListIterator;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestDirectoryCollection {
+
+  private static final File testDir = new File("target",
+      TestDirectoryCollection.class.getName()).getAbsoluteFile();
+  private static final File testFile = new File(testDir, "testfile");
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    testDir.mkdirs();
+    testFile.createNewFile();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    FileUtil.fullyDelete(testDir);
+  }
+
+  @Test
+  public void testConcurrentAccess() throws IOException {
+    // Initialize DirectoryCollection with a file instead of a directory
+    String[] dirs = {testFile.getPath()};
+    DirectoryCollection dc = new DirectoryCollection(dirs);
+
+    // Create an iterator before checkDirs is called to reliable test case
+    List<String> list = dc.getGoodDirs();
+    ListIterator<String> li = list.listIterator();
+
+    // DiskErrorException will invalidate iterator of non-concurrent
+    // collections. ConcurrentModificationException will be thrown upon next
+    // use of the iterator.
+    Assert.assertTrue("checkDirs did not remove test file from directory list",
+        dc.checkDirs());
+
+    // Verify no ConcurrentModification is thrown
+    li.next();
+  }
+}

+ 8 - 5
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java

@@ -121,7 +121,8 @@ class NodesPage extends RmView {
           row.td().a("http://" + httpAddress, httpAddress)._();
           row.td().a("http://" + httpAddress, httpAddress)._();
         }
         }
         row.td(info.getHealthStatus()).
         row.td(info.getHealthStatus()).
-            td(Times.format(info.getLastHealthUpdate())).
+            td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().
+              _(Times.format(info.getLastHealthUpdate()))._().
             td(info.getHealthReport()).
             td(info.getHealthReport()).
             td(String.valueOf(info.getNumContainers())).
             td(String.valueOf(info.getNumContainers())).
             td().br().$title(String.valueOf(usedMemory))._().
             td().br().$title(String.valueOf(usedMemory))._().
@@ -153,10 +154,12 @@ class NodesPage extends RmView {
   }
   }
 
 
   private String nodesTableInit() {
   private String nodesTableInit() {
-    StringBuilder b = tableInit().append(",aoColumnDefs:[");
-    b.append("{'bSearchable':false, 'aTargets': [7]} ,");
-    b.append("{'sType':'title-numeric', 'bSearchable':false, " +
-    		"'aTargets': [ 8, 9] }]}");
+    StringBuilder b = tableInit().append(", aoColumnDefs: [");
+    b.append("{'bSearchable': false, 'aTargets': [ 7 ]}");
+    b.append(", {'sType': 'title-numeric', 'bSearchable': false, " +
+    		"'aTargets': [ 8, 9 ] }");
+    b.append(", {'sType': 'title-numeric', 'aTargets': [ 5 ]}");
+    b.append("]}");
     return b.toString();
     return b.toString();
   }
   }
 }
 }

+ 15 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java

@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentMap;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.GET;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.PathParam;
@@ -77,6 +78,8 @@ public class RMWebServices {
       .getRecordFactory(null);
       .getRecordFactory(null);
   private final ApplicationACLsManager aclsManager;
   private final ApplicationACLsManager aclsManager;
 
 
+  private @Context HttpServletResponse response;
+
   @Inject
   @Inject
   public RMWebServices(final ResourceManager rm,
   public RMWebServices(final ResourceManager rm,
       final ApplicationACLsManager aclsManager) {
       final ApplicationACLsManager aclsManager) {
@@ -100,6 +103,11 @@ public class RMWebServices {
     return true;
     return true;
   }
   }
 
 
+  private void init() {
+    //clear content type
+    response.setContentType(null);
+  }
+
   @GET
   @GET
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ClusterInfo get() {
   public ClusterInfo get() {
@@ -110,6 +118,7 @@ public class RMWebServices {
   @Path("/info")
   @Path("/info")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ClusterInfo getClusterInfo() {
   public ClusterInfo getClusterInfo() {
+    init();
     return new ClusterInfo(this.rm);
     return new ClusterInfo(this.rm);
   }
   }
 
 
@@ -117,6 +126,7 @@ public class RMWebServices {
   @Path("/metrics")
   @Path("/metrics")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ClusterMetricsInfo getClusterMetricsInfo() {
   public ClusterMetricsInfo getClusterMetricsInfo() {
+    init();
     return new ClusterMetricsInfo(this.rm, this.rm.getRMContext());
     return new ClusterMetricsInfo(this.rm, this.rm.getRMContext());
   }
   }
 
 
@@ -124,6 +134,7 @@ public class RMWebServices {
   @Path("/scheduler")
   @Path("/scheduler")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public SchedulerTypeInfo getSchedulerInfo() {
   public SchedulerTypeInfo getSchedulerInfo() {
+    init();
     ResourceScheduler rs = rm.getResourceScheduler();
     ResourceScheduler rs = rm.getResourceScheduler();
     SchedulerInfo sinfo;
     SchedulerInfo sinfo;
     if (rs instanceof CapacityScheduler) {
     if (rs instanceof CapacityScheduler) {
@@ -143,6 +154,7 @@ public class RMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public NodesInfo getNodes(@QueryParam("state") String filterState,
   public NodesInfo getNodes(@QueryParam("state") String filterState,
       @QueryParam("healthy") String healthState) {
       @QueryParam("healthy") String healthState) {
+    init();
     ResourceScheduler sched = this.rm.getResourceScheduler();
     ResourceScheduler sched = this.rm.getResourceScheduler();
     if (sched == null) {
     if (sched == null) {
       throw new NotFoundException("Null ResourceScheduler instance");
       throw new NotFoundException("Null ResourceScheduler instance");
@@ -197,6 +209,7 @@ public class RMWebServices {
   @Path("/nodes/{nodeId}")
   @Path("/nodes/{nodeId}")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
   public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
+    init();
     if (nodeId == null || nodeId.isEmpty()) {
     if (nodeId == null || nodeId.isEmpty()) {
       throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
       throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
     }
     }
@@ -246,6 +259,7 @@ public class RMWebServices {
     long fBegin = 0;
     long fBegin = 0;
     long fEnd = Long.MAX_VALUE;
     long fEnd = Long.MAX_VALUE;
 
 
+    init();
     if (count != null && !count.isEmpty()) {
     if (count != null && !count.isEmpty()) {
       checkCount = true;
       checkCount = true;
       countNum = Long.parseLong(count);
       countNum = Long.parseLong(count);
@@ -355,6 +369,7 @@ public class RMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppInfo getApp(@Context HttpServletRequest hsr,
   public AppInfo getApp(@Context HttpServletRequest hsr,
       @PathParam("appid") String appId) {
       @PathParam("appid") String appId) {
+    init();
     if (appId == null || appId.isEmpty()) {
     if (appId == null || appId.isEmpty()) {
       throw new NotFoundException("appId, " + appId + ", is empty or null");
       throw new NotFoundException("appId, " + appId + ", is empty or null");
     }
     }

+ 3 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
@@ -401,7 +402,8 @@ public class TestContainerManagerSecurity {
     appToken.setService(new Text(schedulerAddr.getHostName() + ":"
     appToken.setService(new Text(schedulerAddr.getHostName() + ":"
         + schedulerAddr.getPort()));
         + schedulerAddr.getPort()));
     currentUser.addToken(appToken);
     currentUser.addToken(appToken);
-
+    SecurityUtil.setTokenService(appToken, schedulerAddr);
+    
     AMRMProtocol scheduler = currentUser
     AMRMProtocol scheduler = currentUser
         .doAs(new PrivilegedAction<AMRMProtocol>() {
         .doAs(new PrivilegedAction<AMRMProtocol>() {
           @Override
           @Override

+ 1 - 1
hadoop-project/src/site/site.xml

@@ -95,7 +95,7 @@
     <menu name="Configuration" inherit="top">
     <menu name="Configuration" inherit="top">
       <item name="core-default.xml" href="hadoop-project-dist/hadoop-common/core-default.xml"/>
       <item name="core-default.xml" href="hadoop-project-dist/hadoop-common/core-default.xml"/>
       <item name="hdfs-default.xml" href="hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
       <item name="hdfs-default.xml" href="hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
-      <item name="yarn-default.xml" href="hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml"/>
+      <item name="yarn-default.xml" href="hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
       <item name="mapred-default.xml" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
       <item name="mapred-default.xml" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
       <item name="Deprecated Properties" href="hadoop-project-dist/hadoop-common/DeprecatedProperties.html"/>
       <item name="Deprecated Properties" href="hadoop-project-dist/hadoop-common/DeprecatedProperties.html"/>
     </menu>
     </menu>

+ 1 - 1
hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java

@@ -613,7 +613,7 @@ public class HadoopArchives implements Tool {
           destFs.delete(tmpOutput, false);
           destFs.delete(tmpOutput, false);
         } 
         } 
         partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), 
         partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), 
-            destFs.getDefaultReplication(), blockSize);
+            destFs.getDefaultReplication(tmpOutput), blockSize);
       } catch(IOException ie) {
       } catch(IOException ie) {
         throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
         throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
       }
       }

+ 6 - 6
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java

@@ -107,8 +107,8 @@ public class RetriableFileCopyCommand extends RetriableCommand {
                              throws IOException {
                              throws IOException {
     OutputStream outStream = new BufferedOutputStream(targetFS.create(
     OutputStream outStream = new BufferedOutputStream(targetFS.create(
             tmpTargetPath, true, BUFFER_SIZE,
             tmpTargetPath, true, BUFFER_SIZE,
-            getReplicationFactor(fileAttributes, sourceFileStatus, targetFS),
-            getBlockSize(fileAttributes, sourceFileStatus, targetFS), context));
+            getReplicationFactor(fileAttributes, sourceFileStatus, targetFS, tmpTargetPath),
+            getBlockSize(fileAttributes, sourceFileStatus, targetFS, tmpTargetPath), context));
     return copyBytes(sourceFileStatus, outStream, BUFFER_SIZE, true, context);
     return copyBytes(sourceFileStatus, outStream, BUFFER_SIZE, true, context);
   }
   }
 
 
@@ -218,16 +218,16 @@ public class RetriableFileCopyCommand extends RetriableCommand {
 
 
   private static short getReplicationFactor(
   private static short getReplicationFactor(
           EnumSet<FileAttribute> fileAttributes,
           EnumSet<FileAttribute> fileAttributes,
-          FileStatus sourceFile, FileSystem targetFS) {
+          FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
     return fileAttributes.contains(FileAttribute.REPLICATION)?
     return fileAttributes.contains(FileAttribute.REPLICATION)?
-            sourceFile.getReplication() : targetFS.getDefaultReplication();
+            sourceFile.getReplication() : targetFS.getDefaultReplication(tmpTargetPath);
   }
   }
 
 
   private static long getBlockSize(
   private static long getBlockSize(
           EnumSet<FileAttribute> fileAttributes,
           EnumSet<FileAttribute> fileAttributes,
-          FileStatus sourceFile, FileSystem targetFS) {
+          FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
     return fileAttributes.contains(FileAttribute.BLOCKSIZE)?
     return fileAttributes.contains(FileAttribute.BLOCKSIZE)?
-            sourceFile.getBlockSize() : targetFS.getDefaultBlockSize();
+            sourceFile.getBlockSize() : targetFS.getDefaultBlockSize(tmpTargetPath);
   }
   }
 
 
   /**
   /**

+ 2 - 2
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCp.java

@@ -110,9 +110,9 @@ public class TestDistCp {
       fs = cluster.getFileSystem();
       fs = cluster.getFileSystem();
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
                                             fs.getWorkingDirectory());
                                             fs.getWorkingDirectory());
-      final long blockSize = fs.getDefaultBlockSize() * 2;
+      final long blockSize = fs.getDefaultBlockSize(new Path(path)) * 2;
       outputStream = fs.create(qualifiedPath, true, 0,
       outputStream = fs.create(qualifiedPath, true, 0,
-              (short)(fs.getDefaultReplication()*2),
+              (short)(fs.getDefaultReplication(new Path(path))*2),
               blockSize);
               blockSize);
       outputStream.write(new byte[FILE_SIZE]);
       outputStream.write(new byte[FILE_SIZE]);
       pathList.add(qualifiedPath);
       pathList.add(qualifiedPath);

+ 485 - 0
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java

@@ -0,0 +1,485 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.fs.viewfs.*;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.tools.util.TestDistCpUtils;
+import org.apache.hadoop.fs.FsConstants;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class TestDistCpViewFs {
+  private static final Log LOG = LogFactory.getLog(TestDistCpViewFs.class);
+
+  private static FileSystem fs;
+
+  private static Path listFile;
+  private static Path target;
+  private static String root;
+
+  private static Configuration getConf() throws URISyntaxException {
+    Configuration conf = new Configuration();
+    conf.set("mapred.job.tracker", "local");
+    conf.set("fs.default.name", "file:///");
+    return conf;
+  }
+
+  @BeforeClass
+  public static void setup() throws URISyntaxException{
+    try {
+      Path fswd = FileSystem.get(getConf()).getWorkingDirectory();
+      Configuration vConf = ViewFileSystemTestSetup.createConfig();
+      ConfigUtil.addLink(vConf, "/usr", new URI(fswd.toString()));
+      fs = FileSystem.get(FsConstants.VIEWFS_URI, vConf);
+      fs.setWorkingDirectory(new Path("/usr"));
+      listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
+              fs.getWorkingDirectory());
+      target = new Path("target/tmp/target").makeQualified(fs.getUri(),
+              fs.getWorkingDirectory());
+      root = new Path("target/tmp").makeQualified(fs.getUri(),
+              fs.getWorkingDirectory()).toString();
+      TestDistCpUtils.delete(fs, root);
+    } catch (IOException e) {
+      LOG.error("Exception encountered ", e);
+    }
+  }
+
+  @Test
+  public void testSingleFileMissingTarget() {
+    caseSingleFileMissingTarget(false);
+    caseSingleFileMissingTarget(true);
+  }
+
+
+  private void caseSingleFileMissingTarget(boolean sync) {
+
+    try {
+      addEntries(listFile, "singlefile1/file1");
+      createFiles("singlefile1/file1");
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 1);
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testSingleFileTargetFile() {
+    caseSingleFileTargetFile(false);
+    caseSingleFileTargetFile(true);
+  }
+
+  private void caseSingleFileTargetFile(boolean sync) {
+
+    try {
+      addEntries(listFile, "singlefile1/file1");
+      createFiles("singlefile1/file1", target.toString());
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 1);
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testSingleFileTargetDir() {
+    caseSingleFileTargetDir(false);
+    caseSingleFileTargetDir(true);
+  }
+
+  private void caseSingleFileTargetDir(boolean sync) {
+
+    try {
+      addEntries(listFile, "singlefile2/file2");
+      createFiles("singlefile2/file2");
+      mkdirs(target.toString());
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 1, "file2");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testSingleDirTargetMissing() {
+    caseSingleDirTargetMissing(false);
+    caseSingleDirTargetMissing(true);
+  }
+
+  private void caseSingleDirTargetMissing(boolean sync) {
+
+    try {
+      addEntries(listFile, "singledir");
+      mkdirs(root + "/singledir/dir1");
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 1, "dir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testSingleDirTargetPresent() {
+
+    try {
+      addEntries(listFile, "singledir");
+      mkdirs(root + "/singledir/dir1");
+      mkdirs(target.toString());
+
+      runTest(listFile, target, false);
+
+      checkResult(target, 1, "singledir/dir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testUpdateSingleDirTargetPresent() {
+
+    try {
+      addEntries(listFile, "Usingledir");
+      mkdirs(root + "/Usingledir/Udir1");
+      mkdirs(target.toString());
+
+      runTest(listFile, target, true);
+
+      checkResult(target, 1, "Udir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testMultiFileTargetPresent() {
+    caseMultiFileTargetPresent(false);
+    caseMultiFileTargetPresent(true);
+  }
+
+  private void caseMultiFileTargetPresent(boolean sync) {
+
+    try {
+      addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      mkdirs(target.toString());
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 3, "file3", "file4", "file5");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testMultiFileTargetMissing() {
+    caseMultiFileTargetMissing(false);
+    caseMultiFileTargetMissing(true);
+  }
+
+  private void caseMultiFileTargetMissing(boolean sync) {
+
+    try {
+      addEntries(listFile, "multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+
+      runTest(listFile, target, sync);
+
+      checkResult(target, 3, "file3", "file4", "file5");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testMultiDirTargetPresent() {
+
+    try {
+      addEntries(listFile, "multifile", "singledir");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      mkdirs(target.toString(), root + "/singledir/dir1");
+
+      runTest(listFile, target, false);
+
+      checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testUpdateMultiDirTargetPresent() {
+
+    try {
+      addEntries(listFile, "Umultifile", "Usingledir");
+      createFiles("Umultifile/Ufile3", "Umultifile/Ufile4", "Umultifile/Ufile5");
+      mkdirs(target.toString(), root + "/Usingledir/Udir1");
+
+      runTest(listFile, target, true);
+
+      checkResult(target, 4, "Ufile3", "Ufile4", "Ufile5", "Udir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testMultiDirTargetMissing() {
+
+    try {
+      addEntries(listFile, "multifile", "singledir");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      mkdirs(root + "/singledir/dir1");
+
+      runTest(listFile, target, false);
+
+      checkResult(target, 2, "multifile/file3", "multifile/file4",
+          "multifile/file5", "singledir/dir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testUpdateMultiDirTargetMissing() {
+
+    try {
+      addEntries(listFile, "multifile", "singledir");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      mkdirs(root + "/singledir/dir1");
+
+      runTest(listFile, target, true);
+
+      checkResult(target, 4, "file3", "file4", "file5", "dir1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+    }
+  }
+
+  @Test
+  public void testGlobTargetMissingSingleLevel() {
+
+    try {
+      Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
+                                fs.getWorkingDirectory());
+      addEntries(listFile, "*");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("singledir/dir2/file6");
+
+      runTest(listFile, target, false);
+
+      checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5",
+          "singledir/dir2/file6");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while testing distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
+
+  @Test
+  public void testUpdateGlobTargetMissingSingleLevel() {
+
+    try {
+      Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
+                                  fs.getWorkingDirectory());
+      addEntries(listFile, "*");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("singledir/dir2/file6");
+
+      runTest(listFile, target, true);
+
+      checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while running distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
+
+  @Test
+  public void testGlobTargetMissingMultiLevel() {
+
+    try {
+      Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
+              fs.getWorkingDirectory());
+      addEntries(listFile, "*/*");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
+          "singledir1/dir3/file9");
+
+      runTest(listFile, target, false);
+
+      checkResult(target, 4, "file3", "file4", "file5",
+          "dir3/file7", "dir3/file8", "dir3/file9");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while running distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
+
+  @Test
+  public void testUpdateGlobTargetMissingMultiLevel() {
+
+    try {
+      Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(),
+              fs.getWorkingDirectory());
+      addEntries(listFile, "*/*");
+      createFiles("multifile/file3", "multifile/file4", "multifile/file5");
+      createFiles("singledir1/dir3/file7", "singledir1/dir3/file8",
+          "singledir1/dir3/file9");
+
+      runTest(listFile, target, true);
+
+      checkResult(target, 6, "file3", "file4", "file5",
+          "file7", "file8", "file9");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while running distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
+
+  private void addEntries(Path listFile, String... entries) throws IOException {
+    OutputStream out = fs.create(listFile);
+    try {
+      for (String entry : entries){
+        out.write((root + "/" + entry).getBytes());
+        out.write("\n".getBytes());
+      }
+    } finally {
+      out.close();
+    }
+  }
+
+  private void createFiles(String... entries) throws IOException {
+    String e;
+    for (String entry : entries){
+      if ((new Path(entry)).isAbsolute()) 
+      {
+        e = entry;
+      } 
+      else 
+      { 
+        e = root + "/" + entry;
+      }
+      OutputStream out = fs.create(new Path(e));
+      try {
+        out.write((e).getBytes());
+        out.write("\n".getBytes());
+      } finally {
+        out.close();
+      }
+    }
+  }
+
+  private void mkdirs(String... entries) throws IOException {
+    for (String entry : entries){
+      fs.mkdirs(new Path(entry));
+    }
+  }
+
+  private void runTest(Path listFile, Path target, boolean sync) throws IOException {
+    DistCpOptions options = new DistCpOptions(listFile, target);
+    options.setSyncFolder(sync);
+    try {
+      new DistCp(getConf(), options).execute();
+    } catch (Exception e) {
+      LOG.error("Exception encountered ", e);
+      throw new IOException(e);
+    }
+  }
+
+  private void checkResult(Path target, int count, String... relPaths) throws IOException {
+    Assert.assertEquals(count, fs.listStatus(target).length);
+    if (relPaths == null || relPaths.length == 0) {
+      Assert.assertTrue(target.toString(), fs.exists(target));
+      return;
+    }
+    for (String relPath : relPaths) {
+      Assert.assertTrue(new Path(target, relPath).toString(), fs.exists(new Path(target, relPath)));
+    }
+  }
+
+}

+ 2 - 2
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java

@@ -127,9 +127,9 @@ public class TestCopyMapper {
       fs = cluster.getFileSystem();
       fs = cluster.getFileSystem();
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
                                                       fs.getWorkingDirectory());
                                                       fs.getWorkingDirectory());
-      final long blockSize = fs.getDefaultBlockSize() * 2;
+      final long blockSize = fs.getDefaultBlockSize(qualifiedPath) * 2;
       outputStream = fs.create(qualifiedPath, true, 0,
       outputStream = fs.create(qualifiedPath, true, 0,
-              (short)(fs.getDefaultReplication()*2),
+              (short)(fs.getDefaultReplication(qualifiedPath)*2),
               blockSize);
               blockSize);
       outputStream.write(new byte[FILE_SIZE]);
       outputStream.write(new byte[FILE_SIZE]);
       pathList.add(qualifiedPath);
       pathList.add(qualifiedPath);

+ 2 - 2
hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCp.java

@@ -374,9 +374,9 @@ public class DistCp implements Tool {
       FsPermission permission = preseved.contains(FileAttribute.PERMISSION)?
       FsPermission permission = preseved.contains(FileAttribute.PERMISSION)?
           srcstat.getPermission(): null;
           srcstat.getPermission(): null;
       short replication = preseved.contains(FileAttribute.REPLICATION)?
       short replication = preseved.contains(FileAttribute.REPLICATION)?
-          srcstat.getReplication(): destFileSys.getDefaultReplication();
+          srcstat.getReplication(): destFileSys.getDefaultReplication(f);
       long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)?
       long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)?
-          srcstat.getBlockSize(): destFileSys.getDefaultBlockSize();
+          srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(f);
       return destFileSys.create(f, permission, true, sizeBuf, replication,
       return destFileSys.create(f, permission, true, sizeBuf, replication,
           blockSize, reporter);
           blockSize, reporter);
     }
     }