Browse Source

Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1387449 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 12 years ago
parent
commit
e9f4de5ced
100 changed files with 1789 additions and 475 deletions
  1. 41 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 12 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  3. 1 1
      hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh
  4. 5 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  5. 0 3
      hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
  6. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  7. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  8. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  9. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
  10. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
  11. 88 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  12. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  13. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  14. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  15. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
  16. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocolPB.java
  17. 35 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/client/GetUserMappingsProtocolPBClientImpl.java
  18. 20 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/service/GetUserMappingsProtocolPBServiceImpl.java
  19. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
  20. 3 3
      hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
  21. 70 60
      hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
  22. 193 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
  23. 23 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  24. 181 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
  25. 166 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
  26. 6 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
  28. 0 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
  29. 22 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
  30. 33 1
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  31. 6 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  32. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
  33. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
  34. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  35. 37 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  36. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  37. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  38. 19 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  39. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
  40. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  41. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  42. 35 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  43. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  44. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  45. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
  46. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  47. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  48. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  49. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  50. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  51. 22 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  52. 0 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  53. 53 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  54. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  55. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  56. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp
  57. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
  58. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp
  59. 27 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  60. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  61. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
  62. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
  63. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  64. 16 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  65. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
  66. 164 41
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
  67. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
  68. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
  69. 25 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  70. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  71. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
  72. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  73. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  74. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
  75. 16 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
  76. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  77. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
  78. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  79. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
  80. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
  81. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  82. 6 0
      hadoop-mapreduce-project/CHANGES.txt
  83. 10 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  84. 33 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  85. 37 33
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  86. 28 10
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
  87. 7 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
  88. 40 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
  89. 54 15
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
  90. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
  91. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
  92. 4 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  93. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
  94. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
  95. 21 12
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
  96. 10 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
  97. 12 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
  98. 12 0
      hadoop-mapreduce-project/pom.xml
  99. 3 3
      hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
  100. 3 3
      hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java

+ 41 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -202,6 +202,18 @@ Trunk (Unreleased)
     HADOOP-8684. Deadlock between WritableComparator and WritableComparable.
     (Jing Zhao via suresh)
 
+    HADOOP-8786. HttpServer continues to start even if AuthenticationFilter
+    fails to init (todd)
+
+    HADOOP-8767. Secondary namenode is started on slave nodes instead of
+    master nodes. (Giovanni Delussu via suresh)
+
+    HADOOP-8818. Use equals instead == in MD5MD5CRC32FileChecksum
+    and TFileDumper. (Brandon Li via suresh)
+
+    HADOOP-8821. Fix findbugs warning related to concatenating string in a 
+    for loop in Configuration#dumpDeprecatedKeys(). (suresh)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -212,12 +224,34 @@ Release 2.0.3-alpha - Unreleased
 
   NEW FEATURES
 
+    HADOOP-8597. Permit FsShell's text command to read Avro files.
+    (Ivan Vladimirov Ivanov via cutting)
+
   IMPROVEMENTS
 
+    HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
+    (Andy Isaacson via eli)
+
+    HADOOP-8755. Print thread dump when tests fail due to timeout. (Andrey
+    Klochkov via atm)
+
+    HADOOP-8806. libhadoop.so: dlopen should be better at locating
+    libsnappy.so, etc. (Colin Patrick McCabe via eli)
+
+    HADOOP-8812. ExitUtil#terminate should print Exception#toString. (eli)
+
+    HADOOP-8805. Move protocol buffer implementation of GetUserMappingProtocol from HDFS to Common. (bowang via tucu)
+
   OPTIMIZATIONS
 
   BUG FIXES
 
+    HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to
+    executable is specified. (Sean Mackrory via atm)
+
+    HADOOP-8780. Update DeprecatedProperties apt file. (Ahmed Radwan via
+    tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -343,6 +377,11 @@ Release 2.0.2-alpha - 2012-09-07
     HADOOP-8754. Deprecate all the RPC.getServer() variants.  (Brandon Li
     via szetszwo)
 
+    HADOOP-8801. ExitUtil#terminate should capture the exception stack trace. (eli)
+
+    HADOOP-8819. Incorrectly & is used instead of && in some file system 
+    implementations. (Brandon Li via suresh)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -497,6 +536,8 @@ Release 2.0.2-alpha - 2012-09-07
     HADOOP-8775. MR2 distcp permits non-positive value to -bandwidth option
     which causes job never to complete. (Sandy Ryza via atm)
 
+    HADOOP-8781. hadoop-config.sh should add JAVA_LIBRARY_PATH to LD_LIBRARY_PATH. (tucu)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active

+ 12 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -109,6 +109,7 @@ add_executable(test_bulk_crc32
 )
 set_property(SOURCE main.cpp PROPERTY INCLUDE_DIRECTORIES "\"-Werror\" \"-Wall\"")
 
+SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
 add_dual_library(hadoop
     ${D}/io/compress/lz4/Lz4Compressor.c
     ${D}/io/compress/lz4/Lz4Decompressor.c
@@ -125,6 +126,17 @@ add_dual_library(hadoop
     ${D}/util/NativeCrc32.c
     ${D}/util/bulk_crc32.c
 )
+
+IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+    #
+    # By embedding '$ORIGIN' into the RPATH of libhadoop.so,
+    # dlopen will look in the directory containing libhadoop.so.
+    # However, $ORIGIN is not supported by all operating systems.
+    #
+    SET_TARGET_PROPERTIES(hadoop 
+        PROPERTIES INSTALL_RPATH "\$ORIGIN/")
+ENDIF()
+
 target_link_dual_libraries(hadoop
     dl
     ${JAVA_JVM_LIBRARY}

+ 1 - 1
hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh

@@ -26,7 +26,7 @@ _hadoop() {
   COMPREPLY=()
   cur=${COMP_WORDS[COMP_CWORD]}
   prev=${COMP_WORDS[COMP_CWORD-1]}  
-  script=${COMP_WORDS[0]}  
+  script=`which ${COMP_WORDS[0]}`
   
   # Bash lets you tab complete things even if the script doesn't
   # exist (or isn't executable). Check to make sure it is, as we

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -74,6 +74,10 @@ fi
 
 export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"
 
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
 # User can specify hostnames or a file where the hostnames are (not both)
 if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
   echo \
@@ -113,9 +117,6 @@ case "`uname`" in
 CYGWIN*) cygwin=true;;
 esac
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
 
 # check if net.ipv6.bindv6only is set to 1
 bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
@@ -243,6 +244,7 @@ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
   HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH
 fi  
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
 

+ 0 - 3
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh

@@ -42,9 +42,6 @@ DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
 
 # Where to start the script, see hadoop-config.sh
 # (it set up the variables based on command line options)

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -2332,7 +2332,17 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   
   /**
    * A unique class which is used as a sentinel value in the caching
-   * for getClassByName. {@see Configuration#getClassByNameOrNull(String)}
+   * for getClassByName. {@link Configuration#getClassByNameOrNull(String)}
    */
   private static abstract class NegativeCacheSentinel {}
+
+  public static void dumpDeprecatedKeys() {
+    for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecatedKeyMap.entrySet()) {
+      StringBuilder newKeys = new StringBuilder();
+      for (String newKey : entry.getValue().newKeys) {
+        newKeys.append(newKey).append("\t");
+      }
+      System.out.println(entry.getKey() + "\t" + newKeys.toString());
+    }
+  }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -133,7 +133,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
 
     try {
       // old versions don't support crcType.
-      if (crcType == null || crcType == "") {
+      if (crcType == null || crcType.equals("")) {
         finalCrcType = DataChecksum.Type.CRC32;
       } else {
         finalCrcType = DataChecksum.Type.valueOf(crcType);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -488,7 +488,7 @@ public class FTPFileSystem extends FileSystem {
       if (created) {
         String parentDir = parent.toUri().getPath();
         client.changeWorkingDirectory(parentDir);
-        created = created & client.makeDirectory(pathName);
+        created = created && client.makeDirectory(pathName);
       }
     } else if (isFile(client, absolute)) {
       throw new IOException(String.format(

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java

@@ -77,7 +77,7 @@ public class FTPInputStream extends FSInputStream {
     if (byteRead >= 0) {
       pos++;
     }
-    if (stats != null & byteRead >= 0) {
+    if (stats != null && byteRead >= 0) {
       stats.incrementBytesRead(1);
     }
     return byteRead;
@@ -93,7 +93,7 @@ public class FTPInputStream extends FSInputStream {
     if (result > 0) {
       pos += result;
     }
-    if (stats != null & result > 0) {
+    if (stats != null && result > 0) {
       stats.incrementBytesRead(result);
     }
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -113,7 +113,7 @@ class S3InputStream extends FSInputStream {
         pos++;
       }
     }
-    if (stats != null & result >= 0) {
+    if (stats != null && result >= 0) {
       stats.incrementBytesRead(1);
     }
     return result;

+ 88 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -17,11 +17,21 @@
  */
 package org.apache.hadoop.fs.shell;
 
-import java.io.IOException;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.InputStream;
+import java.io.IOException;
 import java.util.LinkedList;
 import java.util.zip.GZIPInputStream;
 
+import org.apache.avro.file.DataFileReader;
+import org.apache.avro.file.FileReader;
+import org.apache.avro.generic.GenericDatumReader;
+import org.apache.avro.generic.GenericDatumWriter;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.io.EncoderFactory;
+import org.apache.avro.io.JsonEncoder;
+import org.apache.avro.Schema;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -37,6 +47,10 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.codehaus.jackson.JsonEncoding;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.util.MinimalPrettyPrinter;
 
 /**
  * Display contents of files 
@@ -95,14 +109,14 @@ class Display extends FsCommand {
   
   /**
    * Same behavior as "-cat", but handles zip and TextRecordInputStream
-   * encodings. 
+   * and Avro encodings. 
    */ 
   public static class Text extends Cat {
     public static final String NAME = "text";
     public static final String USAGE = Cat.USAGE;
     public static final String DESCRIPTION =
       "Takes a source file and outputs the file in text format.\n" +
-      "The allowed formats are zip and TextRecordInputStream.";
+      "The allowed formats are zip and TextRecordInputStream and Avro.";
     
     @Override
     protected InputStream getInputStream(PathData item) throws IOException {
@@ -132,6 +146,13 @@ class Display extends FsCommand {
           }
           break;
         }
+        case 0x4f62: { // 'O' 'b'
+          if (i.readByte() == 'j') {
+            i.close();
+            return new AvroFileInputStream(item.stat);
+          }
+          break;
+        }
       }
 
       // File is non-compressed, or not a file container we know.
@@ -187,4 +208,68 @@ class Display extends FsCommand {
       super.close();
     }
   }
+
+  /**
+   * This class transforms a binary Avro data file into an InputStream
+   * with data that is in a human readable JSON format.
+   */
+  protected static class AvroFileInputStream extends InputStream {
+    private int pos;
+    private byte[] buffer;
+    private ByteArrayOutputStream output;
+    private FileReader fileReader;
+    private DatumWriter<Object> writer;
+    private JsonEncoder encoder;
+
+    public AvroFileInputStream(FileStatus status) throws IOException {
+      pos = 0;
+      buffer = new byte[0];
+      GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
+      fileReader =
+        DataFileReader.openReader(new File(status.getPath().toUri()), reader);
+      Schema schema = fileReader.getSchema();
+      writer = new GenericDatumWriter<Object>(schema);
+      output = new ByteArrayOutputStream();
+      JsonGenerator generator =
+        new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8);
+      MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter();
+      prettyPrinter.setRootValueSeparator(System.getProperty("line.separator"));
+      generator.setPrettyPrinter(prettyPrinter);
+      encoder = EncoderFactory.get().jsonEncoder(schema, generator);
+    }
+
+    /**
+     * Read a single byte from the stream.
+     */
+    @Override
+    public int read() throws IOException {
+      if (pos < buffer.length) {
+        return buffer[pos++];
+      }
+      if (!fileReader.hasNext()) {
+        return -1;
+      }
+      writer.write(fileReader.next(), encoder);
+      encoder.flush();
+      if (!fileReader.hasNext()) {
+        // Write a new line after the last Avro record.
+        output.write(System.getProperty("line.separator").getBytes());
+        output.flush();
+      }
+      pos = 0;
+      buffer = output.toByteArray();
+      output.reset();
+      return read();
+    }
+
+    /**
+      * Close the stream.
+      */
+    @Override
+    public void close() throws IOException {
+      fileReader.close();
+      output.close();
+      super.close();
+    }
+  }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -707,7 +707,7 @@ public class ViewFileSystem extends FileSystem {
     @Override
     public boolean mkdirs(Path dir, FsPermission permission)
         throws AccessControlException, FileAlreadyExistsException {
-      if (theInternalDir.isRoot & dir == null) {
+      if (theInternalDir.isRoot && dir == null) {
         throw new FileAlreadyExistsException("/ already exits");
       }
       // Note dir starts with /

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -750,7 +750,7 @@ public class ViewFs extends AbstractFileSystem {
     public void mkdir(final Path dir, final FsPermission permission,
         final boolean createParent) throws AccessControlException,
         FileAlreadyExistsException {
-      if (theInternalDir.isRoot & dir == null) {
+      if (theInternalDir.isRoot && dir == null) {
         throw new FileAlreadyExistsException("/ already exits");
       }
       throw readOnlyMountTable("mkdir", dir);

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -677,6 +677,15 @@ public class HttpServer implements FilterContainer {
               "Problem in starting http server. Server handlers failed");
         }
       }
+      // Make sure there are no errors initializing the context.
+      Throwable unavailableException = webAppContext.getUnavailableException();
+      if (unavailableException != null) {
+        // Have to stop the webserver, or else its non-daemon threads
+        // will hang forever.
+        webServer.stop();
+        throw new IOException("Unable to initialize WebAppContext",
+            unavailableException);
+      }
     } catch (IOException e) {
       throw e;
     } catch (InterruptedException e) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java

@@ -125,7 +125,7 @@ class TFileDumper {
           dataSizeUncompressed += region.getRawSize();
         }
         properties.put("Data Block Bytes", Long.toString(dataSize));
-        if (reader.readerBCF.getDefaultCompressionName() != "none") {
+        if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
           properties.put("Data Block Uncompressed Bytes", Long
               .toString(dataSizeUncompressed));
           properties.put("Data Block Compression Ratio", String.format(

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocolPB.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,21 +16,21 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetUserMappingsProtocolService;
 
 @KerberosInfo(
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
 @ProtocolInfo(
-    protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol", 
+    protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
     protocolVersion = 1)
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "YARN"})
 @InterfaceStability.Evolving
 public interface GetUserMappingsProtocolPB extends
   GetUserMappingsProtocolService.BlockingInterface {

+ 35 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/client/GetUserMappingsProtocolPBClientImpl.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,54 +16,66 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.impl.pb.client;
 
 import java.io.Closeable;
 import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserResponseProto;
 
-import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
-public class GetUserMappingsProtocolClientSideTranslatorPB implements
+public class GetUserMappingsProtocolPBClientImpl implements
     ProtocolMetaInterface, GetUserMappingsProtocol, Closeable {
 
-  /** RpcController is not used and hence is set to null */
-  private final static RpcController NULL_CONTROLLER = null;
-  private final GetUserMappingsProtocolPB rpcProxy;
+  private GetUserMappingsProtocolPB proxy;
   
-  public GetUserMappingsProtocolClientSideTranslatorPB(
-      GetUserMappingsProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
+  public GetUserMappingsProtocolPBClientImpl(
+      long clientVersion, InetSocketAddress addr, Configuration conf)
+      throws IOException {
+    RPC.setProtocolEngine(conf, GetUserMappingsProtocolPB.class,
+        ProtobufRpcEngine.class);
+    proxy = (GetUserMappingsProtocolPB) RPC.getProxy(
+        GetUserMappingsProtocolPB.class, clientVersion, addr, conf);
   }
-
+  
+  public GetUserMappingsProtocolPBClientImpl(
+      GetUserMappingsProtocolPB proxy) {
+    this.proxy = proxy;
+  }
+  
   @Override
   public void close() throws IOException {
-    RPC.stopProxy(rpcProxy);
+    RPC.stopProxy(proxy);
   }
-
+  
   @Override
   public String[] getGroupsForUser(String user) throws IOException {
-    GetGroupsForUserRequestProto request = GetGroupsForUserRequestProto
-        .newBuilder().setUser(user).build();
-    GetGroupsForUserResponseProto resp;
+    GetGroupsForUserRequestProto requestProto = 
+        GetGroupsForUserRequestProto.newBuilder().setUser(user).build();
     try {
-      resp = rpcProxy.getGroupsForUser(NULL_CONTROLLER, request);
-    } catch (ServiceException se) {
-      throw ProtobufHelper.getRemoteException(se);
+      GetGroupsForUserResponseProto responseProto =
+          proxy.getGroupsForUser(null, requestProto);
+      return (String[]) responseProto.getGroupsList().toArray(
+          new String[responseProto.getGroupsCount()]);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
     }
-    return resp.getGroupsList().toArray(new String[resp.getGroupsCount()]);
   }
 
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
-    return RpcClientUtil.isMethodSupported(rpcProxy,
+    return RpcClientUtil.isMethodSupported(proxy,
         GetUserMappingsProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
   }

+ 20 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/service/GetUserMappingsProtocolPBServiceImpl.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,42 +16,43 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.impl.pb.service;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserResponseProto;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
-public class GetUserMappingsProtocolServerSideTranslatorPB implements
+public class GetUserMappingsProtocolPBServiceImpl implements
     GetUserMappingsProtocolPB {
 
-  private final GetUserMappingsProtocol impl;
-
-  public GetUserMappingsProtocolServerSideTranslatorPB(
-      GetUserMappingsProtocol impl) {
-    this.impl = impl;
+  private GetUserMappingsProtocol real;
+  
+  public GetUserMappingsProtocolPBServiceImpl(GetUserMappingsProtocol impl) {
+    this.real = impl;
   }
-
+  
   @Override
   public GetGroupsForUserResponseProto getGroupsForUser(
       RpcController controller, GetGroupsForUserRequestProto request)
       throws ServiceException {
-    String[] groups;
+    String user = request.getUser();
     try {
-      groups = impl.getGroupsForUser(request.getUser());
+      String[] groups = real.getGroupsForUser(user);
+      GetGroupsForUserResponseProto.Builder responseBuilder =
+          GetGroupsForUserResponseProto.newBuilder();
+      for (String group : groups) {
+        responseBuilder.addGroups(group);
+      }
+      return responseBuilder.build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    GetGroupsForUserResponseProto.Builder builder = GetGroupsForUserResponseProto
-        .newBuilder();
-    for (String g : groups) {
-      builder.addGroups(g);
-    }
-    return builder.build();
   }
+
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java

@@ -101,7 +101,7 @@ public final class ExitUtil {
    * @throws ExitException if System.exit is disabled for test purposes
    */
   public static void terminate(int status, Throwable t) throws ExitException {
-    terminate(status, t.getMessage());
+    terminate(status, StringUtils.stringifyException(t));
   }
 
   /**

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto → hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto

@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "GetUserMappingsProtocolProtos";
+ 
+option java_package = "org.apache.hadoop.tools.proto";
+option java_outer_classname = "GetUserMappingsProtocol";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 

+ 70 - 60
hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm

@@ -24,8 +24,6 @@ Deprecated Properties
 *-------------------------------+-----------------------+
 || <<Deprecated property name>> || <<New property name>>|
 *-------------------------------+-----------------------+
-|StorageId | dfs.datanode.StorageId
-*---+---+
 |create.empty.dir.if.nonexist | mapreduce.jobcontrol.createdir.ifnotexist
 *---+---+
 |dfs.access.time.precision | dfs.namenode.accesstime.precision
@@ -38,14 +36,16 @@ Deprecated Properties
 *---+---+
 |dfs.block.size | dfs.blocksize
 *---+---+
-|dfs.client.buffer.dir | fs.client.buffer.dir
-*---+---+
 |dfs.data.dir | dfs.datanode.data.dir
 *---+---+
 |dfs.datanode.max.xcievers | dfs.datanode.max.transfer.threads
 *---+---+
 |dfs.df.interval | fs.df.interval
 *---+---+
+|dfs.federation.nameservice.id | dfs.nameservice.id
+*---+---+
+|dfs.federation.nameservices | dfs.nameservices
+*---+---+
 |dfs.http.address | dfs.namenode.http-address
 *---+---+
 |dfs.https.address | dfs.namenode.https-address
@@ -54,10 +54,10 @@ Deprecated Properties
 *---+---+
 |dfs.https.need.client.auth | dfs.client.https.need-auth
 *---+---+
-|dfs.max-repl-streams | dfs.namenode.replication.max-streams
-*---+---+
 |dfs.max.objects | dfs.namenode.max.objects
 *---+---+
+|dfs.max-repl-streams | dfs.namenode.replication.max-streams
+*---+---+
 |dfs.name.dir | dfs.namenode.name.dir
 *---+---+
 |dfs.name.dir.restore | dfs.namenode.name.dir.restore
@@ -86,6 +86,8 @@ Deprecated Properties
 *---+---+
 |dfs.socket.timeout | dfs.client.socket-timeout
 *---+---+
+|dfs.umaskmode | fs.permissions.umask-mode
+*---+---+
 |dfs.write.packet.size | dfs.client-write-packet-size
 *---+---+
 |fs.checkpoint.dir | dfs.namenode.checkpoint.dir
@@ -106,10 +108,10 @@ Deprecated Properties
 *---+---+
 |hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve
 *---+---+
-|hadoop.pipes.executable | mapreduce.pipes.executable
-*---+---+
 |hadoop.pipes.executable.interpretor | mapreduce.pipes.executable.interpretor
 *---+---+
+|hadoop.pipes.executable | mapreduce.pipes.executable
+*---+---+
 |hadoop.pipes.java.mapper | mapreduce.pipes.isjavamapper
 *---+---+
 |hadoop.pipes.java.recordreader | mapreduce.pipes.isjavarecordreader
@@ -130,6 +132,12 @@ Deprecated Properties
 *---+---+
 |io.sort.spill.percent | mapreduce.map.sort.spill.percent
 *---+---+
+|jobclient.completion.poll.interval | mapreduce.client.completion.pollinterval
+*---+---+
+|jobclient.output.filter | mapreduce.client.output.filter
+*---+---+
+|jobclient.progress.monitor.poll.interval | mapreduce.client.progressmonitor.pollinterval
+*---+---+
 |job.end.notification.url | mapreduce.job.end-notification.url
 *---+---+
 |job.end.retry.attempts | mapreduce.job.end-notification.retry.attempts
@@ -138,12 +146,6 @@ Deprecated Properties
 *---+---+
 |job.local.dir | mapreduce.job.local.dir
 *---+---+
-|jobclient.completion.poll.interval | mapreduce.client.completion.pollinterval
-*---+---+
-|jobclient.output.filter | mapreduce.client.output.filter
-*---+---+
-|jobclient.progress.monitor.poll.interval | mapreduce.client.progressmonitor.pollinterval
-*---+---+
 |keep.failed.task.files | mapreduce.task.files.preserve.failedtasks
 *---+---+
 |keep.task.files.pattern | mapreduce.task.files.preserve.filepattern
@@ -196,10 +198,6 @@ Deprecated Properties
 *---+---+
 |mapred.compress.map.output | mapreduce.map.output.compress
 *---+---+
-|mapred.create.symlink | NONE - symlinking is always on
-*---+---+
-|mapreduce.job.cache.symlink.create | NONE - symlinking is always on
-*---+---+
 |mapred.data.field.separator | mapreduce.fieldsel.data.field.separator
 *---+---+
 |mapred.debug.out.lines | mapreduce.task.debugout.lines
@@ -214,18 +212,18 @@ Deprecated Properties
 *---+---+
 |mapred.heartbeats.in.second | mapreduce.jobtracker.heartbeats.in.second
 *---+---+
-|mapred.hosts | mapreduce.jobtracker.hosts.filename
-*---+---+
 |mapred.hosts.exclude | mapreduce.jobtracker.hosts.exclude.filename
 *---+---+
-|mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold
+|mapred.hosts | mapreduce.jobtracker.hosts.filename
 *---+---+
-|mapred.input.dir | mapreduce.input.fileinputformat.inputdir
+|mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold
 *---+---+
 |mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats
 *---+---+
 |mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers
 *---+---+
+|mapred.input.dir | mapreduce.input.fileinputformat.inputdir
+*---+---+
 |mapred.input.pathFilter.class | mapreduce.input.pathFilter.class
 *---+---+
 |mapred.jar | mapreduce.job.jar
@@ -236,6 +234,8 @@ Deprecated Properties
 *---+---+
 |mapred.job.id | mapreduce.job.id
 *---+---+
+|mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads
+*---+---+
 |mapred.job.map.memory.mb | mapreduce.map.memory.mb
 *---+---+
 |mapred.job.name | mapreduce.job.name
@@ -258,42 +258,40 @@ Deprecated Properties
 *---+---+
 |mapred.job.shuffle.merge.percent | mapreduce.reduce.shuffle.merge.percent
 *---+---+
-|mapred.job.tracker | mapreduce.jobtracker.address
-*---+---+
 |mapred.job.tracker.handler.count | mapreduce.jobtracker.handler.count
 *---+---+
 |mapred.job.tracker.history.completed.location | mapreduce.jobtracker.jobhistory.completed.location
 *---+---+
 |mapred.job.tracker.http.address | mapreduce.jobtracker.http.address
 *---+---+
+|mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation
+*---+---+
+|mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size
+*---+---+
 |mapred.job.tracker.jobhistory.lru.cache.size | mapreduce.jobtracker.jobhistory.lru.cache.size
 *---+---+
+|mapred.job.tracker | mapreduce.jobtracker.address
+*---+---+
+|mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob
+*---+---+
 |mapred.job.tracker.persist.jobstatus.active | mapreduce.jobtracker.persist.jobstatus.active
 *---+---+
 |mapred.job.tracker.persist.jobstatus.dir | mapreduce.jobtracker.persist.jobstatus.dir
 *---+---+
 |mapred.job.tracker.persist.jobstatus.hours | mapreduce.jobtracker.persist.jobstatus.hours
 *---+---+
-|mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs
+|mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover
 *---+---+
 |mapred.job.tracker.retiredjobs.cache.size | mapreduce.jobtracker.retiredjobs.cache.size
 *---+---+
-|mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads
-*---+---+
-|mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation
-*---+---+
-|mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size
-*---+---+
-|mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob
+|mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs
 *---+---+
-|mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover
+|mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad
 *---+---+
 |mapred.jobtracker.taskScheduler | mapreduce.jobtracker.taskscheduler
 *---+---+
 |mapred.jobtracker.taskScheduler.maxRunningTasksPerJob | mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob
 *---+---+
-|mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad
-*---+---+
 |mapred.join.expr | mapreduce.join.expr
 *---+---+
 |mapred.join.keycomparator | mapreduce.join.keycomparator
@@ -320,19 +318,19 @@ Deprecated Properties
 *---+---+
 |mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
 *---+---+
-|mapred.map.task.debug.script | mapreduce.map.debug.script
-*---+---+
-|mapred.map.tasks | mapreduce.job.maps
-*---+---+
-|mapred.map.tasks.speculative.execution | mapreduce.map.speculative
-*---+---+
 |mapred.mapoutput.key.class | mapreduce.map.output.key.class
 *---+---+
 |mapred.mapoutput.value.class | mapreduce.map.output.value.class
 *---+---+
+|mapred.mapper.regex.group | mapreduce.mapper.regexmapper..group
+*---+---+
 |mapred.mapper.regex | mapreduce.mapper.regex
 *---+---+
-|mapred.mapper.regex.group | mapreduce.mapper.regexmapper..group
+|mapred.map.task.debug.script | mapreduce.map.debug.script
+*---+---+
+|mapred.map.tasks | mapreduce.job.maps
+*---+---+
+|mapred.map.tasks.speculative.execution | mapreduce.map.speculative
 *---+---+
 |mapred.max.map.failures.percent | mapreduce.map.failures.maxpercent
 *---+---+
@@ -352,12 +350,12 @@ Deprecated Properties
 *---+---+
 |mapred.min.split.size.per.rack | mapreduce.input.fileinputformat.split.minsize.per.rack
 *---+---+
-|mapred.output.compress | mapreduce.output.fileoutputformat.compress
-*---+---+
 |mapred.output.compression.codec | mapreduce.output.fileoutputformat.compress.codec
 *---+---+
 |mapred.output.compression.type | mapreduce.output.fileoutputformat.compress.type
 *---+---+
+|mapred.output.compress | mapreduce.output.fileoutputformat.compress
+*---+---+
 |mapred.output.dir | mapreduce.output.fileoutputformat.outputdir
 *---+---+
 |mapred.output.key.class | mapreduce.job.output.key.class
@@ -440,12 +438,6 @@ Deprecated Properties
 *---+---+
 |mapred.task.timeout | mapreduce.task.timeout
 *---+---+
-|mapred.task.tracker.http.address | mapreduce.tasktracker.http.address
-*---+---+
-|mapred.task.tracker.report.address | mapreduce.tasktracker.report.address
-*---+---+
-|mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller
-*---+---+
 |mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface
 *---+---+
 |mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver
@@ -454,6 +446,8 @@ Deprecated Properties
 *---+---+
 |mapred.tasktracker.expiry.interval | mapreduce.jobtracker.expire.trackers.interval
 *---+---+
+|mapred.task.tracker.http.address | mapreduce.tasktracker.http.address
+*---+---+
 |mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb
 *---+---+
 |mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation
@@ -466,6 +460,10 @@ Deprecated Properties
 *---+---+
 |mapred.tasktracker.reduce.tasks.maximum | mapreduce.tasktracker.reduce.tasks.maximum
 *---+---+
+|mapred.task.tracker.report.address | mapreduce.tasktracker.report.address
+*---+---+
+|mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller
+*---+---+
 |mapred.tasktracker.taskmemorymanager.monitoring-interval | mapreduce.tasktracker.taskmemorymanager.monitoringinterval
 *---+---+
 |mapred.tasktracker.tasks.sleeptime-before-sigkill | mapreduce.tasktracker.tasks.sleeptimebeforesigkill
@@ -480,20 +478,12 @@ Deprecated Properties
 *---+---+
 |mapred.tip.id | mapreduce.task.id
 *---+---+
-|mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used
-*---+---+
-|mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb
-*---+---+
-|mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours
-*---+---+
-|mapred.work.output.dir | mapreduce.task.output.dir
-*---+---+
-|mapred.working.dir | mapreduce.job.working.dir
-*---+---+
 |mapreduce.combine.class | mapreduce.job.combine.class
 *---+---+
 |mapreduce.inputformat.class | mapreduce.job.inputformat.class
 *---+---+
+|mapreduce.job.counters.limit | mapreduce.job.counters.max
+*---+---+
 |mapreduce.jobtracker.permissions.supergroup | mapreduce.cluster.permissions.supergroup
 *---+---+
 |mapreduce.map.class | mapreduce.job.map.class
@@ -504,6 +494,16 @@ Deprecated Properties
 *---+---+
 |mapreduce.reduce.class | mapreduce.job.reduce.class
 *---+---+
+|mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used
+*---+---+
+|mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb
+*---+---+
+|mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours
+*---+---+
+|mapred.working.dir | mapreduce.job.working.dir
+*---+---+
+|mapred.work.output.dir | mapreduce.task.output.dir
+*---+---+
 |min.num.spills.for.combine | mapreduce.map.combine.minspills
 *---+---+
 |reduce.output.key.value.fields.spec | mapreduce.fieldsel.reduce.output.key.value.fields.spec
@@ -537,4 +537,14 @@ Deprecated Properties
 |user.name | mapreduce.job.user.name
 *---+---+
 |webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted
+*---+---+
+
+  The following table lists additional changes to some configuration properties:
+
+*-------------------------------+-----------------------+
+|| <<Deprecated property name>> || <<New property name>>|
+*-------------------------------+-----------------------+
+|mapred.create.symlink | NONE - symlinking is always on
+*---+---+
+|mapreduce.job.cache.symlink.create | NONE - symlinking is always on
 *---+---+

+ 193 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java

@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.lang.reflect.Method;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+/**
+ * This class tests the logic for displaying the binary formats supported
+ * by the Text command.
+ */
+public class TestTextCommand {
+  private static final String TEST_ROOT_DIR =
+    System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final String AVRO_FILENAME = TEST_ROOT_DIR + "/weather.avro";
+
+  /**
+   * Tests whether binary Avro data files are displayed correctly.
+   */
+  @Test
+  public void testDisplayForAvroFiles() throws Exception {
+    // Create a small Avro data file on the local file system.
+    createAvroFile(generateWeatherAvroBinaryData());
+
+    // Prepare and call the Text command's protected getInputStream method
+    // using reflection.
+    Configuration conf = new Configuration();
+    File localPath = new File(AVRO_FILENAME);
+    PathData pathData = new PathData(localPath, conf);
+    Display.Text text = new Display.Text();
+    text.setConf(conf);
+    Method method = text.getClass().getDeclaredMethod(
+      "getInputStream", PathData.class);
+    method.setAccessible(true);
+    InputStream stream = (InputStream) method.invoke(text, pathData);
+    String output = inputStreamToString(stream);
+
+    // Check the output.
+    String expectedOutput =
+      "{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
+      System.getProperty("line.separator");
+
+    assertEquals(expectedOutput, output);
+  }
+
+  private String inputStreamToString(InputStream stream) throws IOException {
+    StringWriter writer = new StringWriter();
+    IOUtils.copy(stream, writer);
+    return writer.toString();
+  }
+
+  private void createAvroFile(byte[] contents) throws IOException {
+    (new File(TEST_ROOT_DIR)).mkdir();
+    File file = new File(AVRO_FILENAME);
+    file.createNewFile();
+    FileOutputStream stream = new FileOutputStream(file);
+    stream.write(contents);
+    stream.close();
+  }
+
+  private byte[] generateWeatherAvroBinaryData() {
+    // The contents of a simple binary Avro file with weather records.
+    byte[] contents = {
+      (byte) 0x4f, (byte) 0x62, (byte) 0x6a, (byte)  0x1,
+      (byte)  0x4, (byte) 0x14, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x63,
+      (byte) 0x6f, (byte) 0x64, (byte) 0x65, (byte) 0x63,
+      (byte)  0x8, (byte) 0x6e, (byte) 0x75, (byte) 0x6c,
+      (byte) 0x6c, (byte) 0x16, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x73,
+      (byte) 0x63, (byte) 0x68, (byte) 0x65, (byte) 0x6d,
+      (byte) 0x61, (byte) 0xf2, (byte)  0x2, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x72, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
+      (byte) 0x72, (byte) 0x64, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x57, (byte) 0x65, (byte) 0x61, (byte) 0x74,
+      (byte) 0x68, (byte) 0x65, (byte) 0x72, (byte) 0x22,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x6e, (byte) 0x61,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x73, (byte) 0x70,
+      (byte) 0x61, (byte) 0x63, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x65,
+      (byte) 0x73, (byte) 0x74, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x66, (byte) 0x69, (byte) 0x65,
+      (byte) 0x6c, (byte) 0x64, (byte) 0x73, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x5b, (byte) 0x7b, (byte) 0x22,
+      (byte) 0x6e, (byte) 0x61, (byte) 0x6d, (byte) 0x65,
+      (byte) 0x22, (byte) 0x3a, (byte) 0x22, (byte) 0x73,
+      (byte) 0x74, (byte) 0x61, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6f, (byte) 0x6e, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x73, (byte) 0x74, (byte) 0x72, (byte) 0x69,
+      (byte) 0x6e, (byte) 0x67, (byte) 0x22, (byte) 0x7d,
+      (byte) 0x2c, (byte) 0x7b, (byte) 0x22, (byte) 0x6e,
+      (byte) 0x61, (byte) 0x6d, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x6c, (byte) 0x6f, (byte) 0x6e, (byte) 0x67,
+      (byte) 0x22, (byte) 0x7d, (byte) 0x2c, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x74, (byte) 0x65, (byte) 0x6d, (byte) 0x70,
+      (byte) 0x22, (byte) 0x2c, (byte) 0x22, (byte) 0x74,
+      (byte) 0x79, (byte) 0x70, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x74, (byte) 0x22, (byte) 0x7d, (byte) 0x5d,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x64, (byte) 0x6f,
+      (byte) 0x63, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x41, (byte) 0x20, (byte) 0x77, (byte) 0x65,
+      (byte) 0x61, (byte) 0x74, (byte) 0x68, (byte) 0x65,
+      (byte) 0x72, (byte) 0x20, (byte) 0x72, (byte) 0x65,
+      (byte) 0x61, (byte) 0x64, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x67, (byte) 0x2e, (byte) 0x22, (byte) 0x7d,
+      (byte)  0x0, (byte) 0xb0, (byte) 0x81, (byte) 0xb3,
+      (byte) 0xc4, (byte)  0xa, (byte)  0xc, (byte) 0xf6,
+      (byte) 0x62, (byte) 0xfa, (byte) 0xc9, (byte) 0x38,
+      (byte) 0xfd, (byte) 0x7e, (byte) 0x52, (byte)  0x0,
+      (byte) 0xa7, (byte)  0xa, (byte) 0xcc, (byte)  0x1,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa3, (byte) 0x90,
+      (byte) 0xe8, (byte) 0x87, (byte) 0x24, (byte)  0x0,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0x81, (byte) 0xfb,
+      (byte) 0xd6, (byte) 0x87, (byte) 0x24, (byte) 0x2c,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa5, (byte) 0xae,
+      (byte) 0xc2, (byte) 0x87, (byte) 0x24, (byte) 0x15,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x32,
+      (byte) 0x36, (byte) 0x35, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xb7, (byte) 0xa2,
+      (byte) 0x8b, (byte) 0x94, (byte) 0x26, (byte) 0xde,
+      (byte)  0x1, (byte) 0x18, (byte) 0x30, (byte) 0x31,
+      (byte) 0x32, (byte) 0x36, (byte) 0x35, (byte) 0x30,
+      (byte) 0x2d, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0x39, (byte) 0xff, (byte) 0xdb,
+      (byte) 0xd5, (byte) 0xf6, (byte) 0x93, (byte) 0x26,
+      (byte) 0x9c, (byte)  0x1, (byte) 0xb0, (byte) 0x81,
+      (byte) 0xb3, (byte) 0xc4, (byte)  0xa, (byte)  0xc,
+      (byte) 0xf6, (byte) 0x62, (byte) 0xfa, (byte) 0xc9,
+      (byte) 0x38, (byte) 0xfd, (byte) 0x7e, (byte) 0x52,
+      (byte)  0x0, (byte) 0xa7,
+    };
+
+    return contents;
+  }
+}
+

+ 23 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestServletFilter extends HttpServerFunctionalTest {
@@ -163,7 +164,7 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   @Test
   public void testServletFilterWhenInitThrowsException() throws Exception {
     Configuration conf = new Configuration();
-    // start a http server with CountingFilter
+    // start a http server with ErrorFilter
     conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
         ErrorFilter.Initializer.class.getName());
     HttpServer http = createTestServer(conf);
@@ -174,4 +175,25 @@ public class TestServletFilter extends HttpServerFunctionalTest {
       assertTrue( e.getMessage().contains("Problem in starting http server. Server handlers failed"));
     }
   }
+  
+  /**
+   * Similar to the above test case, except that it uses a different API to add the
+   * filter. Regression test for HADOOP-8786.
+   */
+  @Test
+  public void testContextSpecificServletFilterWhenInitThrowsException()
+      throws Exception {
+    Configuration conf = new Configuration();
+    HttpServer http = createTestServer(conf);
+    http.defineFilter(http.webAppContext,
+        "ErrorFilter", ErrorFilter.class.getName(),
+        null, null);
+    try {
+      http.start();
+      fail("expecting exception");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Unable to initialize WebAppContext", e);
+    }
+  }
+
 }

+ 181 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java

@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.notification.Failure;
+
+public class TestTimedOutTestsListener {
+
+  public static class Deadlock {
+    private CyclicBarrier barrier = new CyclicBarrier(6);
+  
+    public Deadlock() {
+      DeadlockThread[] dThreads = new DeadlockThread[6];
+  
+      Monitor a = new Monitor("a");
+      Monitor b = new Monitor("b");
+      Monitor c = new Monitor("c");
+      dThreads[0] = new DeadlockThread("MThread-1", a, b);
+      dThreads[1] = new DeadlockThread("MThread-2", b, c);
+      dThreads[2] = new DeadlockThread("MThread-3", c, a);
+  
+      Lock d = new ReentrantLock();
+      Lock e = new ReentrantLock();
+      Lock f = new ReentrantLock();
+  
+      dThreads[3] = new DeadlockThread("SThread-4", d, e);
+      dThreads[4] = new DeadlockThread("SThread-5", e, f);
+      dThreads[5] = new DeadlockThread("SThread-6", f, d);
+  
+      // make them daemon threads so that the test will exit
+      for (int i = 0; i < 6; i++) {
+        dThreads[i].setDaemon(true);
+        dThreads[i].start();
+      }
+    }
+  
+    class DeadlockThread extends Thread {
+      private Lock lock1 = null;
+  
+      private Lock lock2 = null;
+  
+      private Monitor mon1 = null;
+  
+      private Monitor mon2 = null;
+  
+      private boolean useSync;
+  
+      DeadlockThread(String name, Lock lock1, Lock lock2) {
+        super(name);
+        this.lock1 = lock1;
+        this.lock2 = lock2;
+        this.useSync = true;
+      }
+  
+      DeadlockThread(String name, Monitor mon1, Monitor mon2) {
+        super(name);
+        this.mon1 = mon1;
+        this.mon2 = mon2;
+        this.useSync = false;
+      }
+  
+      public void run() {
+        if (useSync) {
+          syncLock();
+        } else {
+          monitorLock();
+        }
+      }
+  
+      private void syncLock() {
+        lock1.lock();
+        try {
+          try {
+            barrier.await();
+          } catch (Exception e) {
+          }
+          goSyncDeadlock();
+        } finally {
+          lock1.unlock();
+        }
+      }
+  
+      private void goSyncDeadlock() {
+        try {
+          barrier.await();
+        } catch (Exception e) {
+        }
+        lock2.lock();
+        throw new RuntimeException("should not reach here.");
+      }
+  
+      private void monitorLock() {
+        synchronized (mon1) {
+          try {
+            barrier.await();
+          } catch (Exception e) {
+          }
+          goMonitorDeadlock();
+        }
+      }
+  
+      private void goMonitorDeadlock() {
+        try {
+          barrier.await();
+        } catch (Exception e) {
+        }
+        synchronized (mon2) {
+          throw new RuntimeException(getName() + " should not reach here.");
+        }
+      }
+    }
+  
+    class Monitor {
+      String name;
+  
+      Monitor(String name) {
+        this.name = name;
+      }
+    }
+  
+  }
+
+  @Test(timeout=500)
+  public void testThreadDumpAndDeadlocks() throws Exception {
+    new Deadlock();
+    String s = null;
+    while (true) {
+      s = TimedOutTestsListener.buildDeadlockInfo();
+      if (s != null)
+        break;
+      Thread.sleep(100);
+    }
+    
+    Assert.assertEquals(3, countStringOccurrences(s, "BLOCKED"));
+    
+    Failure failure = new Failure(
+        null, new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
+    StringWriter writer = new StringWriter();
+    new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
+    String out = writer.toString();
+    
+    Assert.assertTrue(out.contains("THREAD DUMP"));
+    Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
+    
+    System.out.println(out);
+  }
+
+  private int countStringOccurrences(String s, String substr) {
+    int n = 0;
+    int index = 0;
+    while ((index = s.indexOf(substr, index) + 1) != 0) {
+      n++;
+    }
+    return n;
+  }
+
+}

+ 166 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java

@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.management.LockInfo;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MonitorInfo;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Map;
+
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+/**
+ * JUnit run listener which prints full thread dump into System.err
+ * in case a test is failed due to timeout.
+ */
+public class TimedOutTestsListener extends RunListener {
+
+  static final String TEST_TIMED_OUT_PREFIX = "test timed out after";
+  
+  private static String INDENT = "    ";
+
+  private final PrintWriter output;
+  
+  public TimedOutTestsListener() {
+    this.output = new PrintWriter(System.err);
+  }
+  
+  public TimedOutTestsListener(PrintWriter output) {
+    this.output = output;
+  }
+
+  @Override
+  public void testFailure(Failure failure) throws Exception {
+    if (failure != null && failure.getMessage() != null 
+        && failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
+      output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
+      output.println();
+      DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
+      output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
+      output.println();
+      output.println(buildThreadDump());
+      
+      String deadlocksInfo = buildDeadlockInfo();
+      if (deadlocksInfo != null) {
+        output.println("====> DEADLOCKS DETECTED <====");
+        output.println();
+        output.println(deadlocksInfo);
+      }
+    }
+  }
+
+  static String buildThreadDump() {
+    StringBuilder dump = new StringBuilder();
+    Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces();
+    for (Map.Entry<Thread, StackTraceElement[]> e : stackTraces.entrySet()) {
+      Thread thread = e.getKey();
+      dump.append(String.format(
+          "\"%s\" %s prio=%d tid=%d %s\njava.lang.Thread.State: %s",
+          thread.getName(),
+          (thread.isDaemon() ? "daemon" : ""),
+          thread.getPriority(),
+          thread.getId(),
+          Thread.State.WAITING.equals(thread.getState()) ? 
+              "in Object.wait()" : thread.getState().name().toLowerCase(),
+          Thread.State.WAITING.equals(thread.getState()) ? 
+              "WAITING (on object monitor)" : thread.getState()));
+      for (StackTraceElement stackTraceElement : e.getValue()) {
+        dump.append("\n        at ");
+        dump.append(stackTraceElement);
+      }
+      dump.append("\n");
+    }
+    return dump.toString();
+  }
+  
+  static String buildDeadlockInfo() {
+    ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+    long[] threadIds = threadBean.findMonitorDeadlockedThreads();
+    if (threadIds != null && threadIds.length > 0) {
+      StringWriter stringWriter = new StringWriter();
+      PrintWriter out = new PrintWriter(stringWriter);
+      
+      ThreadInfo[] infos = threadBean.getThreadInfo(threadIds, true, true);
+      for (ThreadInfo ti : infos) {
+        printThreadInfo(ti, out);
+        printLockInfo(ti.getLockedSynchronizers(), out);
+        out.println();
+      }
+      
+      out.close();
+      return stringWriter.toString();
+    } else {
+      return null;
+    }
+  }
+  
+  private static void printThreadInfo(ThreadInfo ti, PrintWriter out) {
+    // print thread information
+    printThread(ti, out);
+
+    // print stack trace with locks
+    StackTraceElement[] stacktrace = ti.getStackTrace();
+    MonitorInfo[] monitors = ti.getLockedMonitors();
+    for (int i = 0; i < stacktrace.length; i++) {
+      StackTraceElement ste = stacktrace[i];
+      out.println(INDENT + "at " + ste.toString());
+      for (MonitorInfo mi : monitors) {
+        if (mi.getLockedStackDepth() == i) {
+          out.println(INDENT + "  - locked " + mi);
+        }
+      }
+    }
+    out.println();
+  }
+
+  private static void printThread(ThreadInfo ti, PrintWriter out) {
+    out.print("\"" + ti.getThreadName() + "\"" + " Id="
+        + ti.getThreadId() + " in " + ti.getThreadState());
+    if (ti.getLockName() != null) {
+      out.print(" on lock=" + ti.getLockName());
+    }
+    if (ti.isSuspended()) {
+      out.print(" (suspended)");
+    }
+    if (ti.isInNative()) {
+      out.print(" (running in native)");
+    }
+    out.println();
+    if (ti.getLockOwnerName() != null) {
+      out.println(INDENT + " owned by " + ti.getLockOwnerName() + " Id="
+          + ti.getLockOwnerId());
+    }
+  }
+
+  private static void printLockInfo(LockInfo[] locks, PrintWriter out) {
+    out.println(INDENT + "Locked synchronizers: count = " + locks.length);
+    for (LockInfo li : locks) {
+      out.println(INDENT + "  - " + li);
+    }
+    out.println();
+  }
+  
+}

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -301,6 +301,12 @@
             <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
             <kerberos.realm>${kerberos.realm}</kerberos.realm>
           </systemPropertyVariables>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
           <excludes>
             <exclude>**/${test.exclude}.java</exclude>
             <exclude>${test.exclude.pattern}</exclude>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java

@@ -181,7 +181,7 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
       throw new ServerException(ServerException.ERROR.S13, portKey);
     }
     try {
-      InetAddress add = InetAddress.getByName(hostnameKey);
+      InetAddress add = InetAddress.getByName(host);
       int portNum = Integer.parseInt(port);
       return new InetSocketAddress(add, portNum);
     } catch (UnknownHostException ex) {

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm

@@ -81,8 +81,3 @@ Hadoop HDFS over HTTP - Documentation Sets ${project.version}
 
   * {{{./UsingHttpTools.html}Using HTTP Tools}}
 
-* Current Limitations
-
-  <<<GETDELEGATIONTOKEN, RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN>>>
-  operations are not supported.
-

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java

@@ -24,8 +24,11 @@ import org.apache.hadoop.lib.server.Server;
 import org.apache.hadoop.test.HTestCase;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDirHelper;
+import org.junit.Assert;
 import org.junit.Test;
 
+import java.net.InetSocketAddress;
+
 public class TestServerWebApp extends HTestCase {
 
   @Test(expected = IllegalArgumentException.class)
@@ -74,4 +77,23 @@ public class TestServerWebApp extends HTestCase {
 
     server.contextInitialized(null);
   }
+
+  @Test
+  @TestDir
+  public void testResolveAuthority() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+    System.setProperty("TestServerWebApp3.home.dir", dir);
+    System.setProperty("TestServerWebApp3.config.dir", dir);
+    System.setProperty("TestServerWebApp3.log.dir", dir);
+    System.setProperty("TestServerWebApp3.temp.dir", dir);
+    System.setProperty("testserverwebapp3.http.hostname", "localhost");
+    System.setProperty("testserverwebapp3.http.port", "14000");
+    ServerWebApp server = new ServerWebApp("TestServerWebApp3") {
+    };
+
+    InetSocketAddress address = server.resolveAuthority();
+    Assert.assertEquals("localhost", address.getHostName());
+    Assert.assertEquals(14000, address.getPort());
+  }
+
 }

+ 33 - 1
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -16,6 +16,10 @@ Trunk (Unreleased)
     HDFS-3601. Add BlockPlacementPolicyWithNodeGroup to support block placement
     with 4-layer network topology.  (Junping Du via szetszwo)
 
+    HDFS-3703. Datanodes are marked stale if heartbeat is not received in
+    configured timeout and are selected as the last location to read from.
+    (Jing Zhao via suresh)
+
   IMPROVEMENTS
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -225,11 +229,23 @@ Release 2.0.3-alpha - Unreleased
     (Jaimin D Jetly and Jing Zhao via szetszwo)
 
   IMPROVEMENTS
+  
+    HDFS-3925. Prettify PipelineAck#toString() for printing to a log
+    (Andrew Wang via todd)
+
+    HDFS-3939. NN RPC address cleanup. (eli)
 
   OPTIMIZATIONS
 
   BUG FIXES
 
+    HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
+    (Andy Isaacson via eli)
+
+    HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm)
+
+    HDFS-3936. MiniDFSCluster shutdown races with BlocksMap usage. (eli)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -464,6 +480,10 @@ Release 2.0.2-alpha - 2012-09-07
 
     HDFS-3888. Clean up BlockPlacementPolicyDefault.  (Jing Zhao via szetszwo)
 
+    HDFS-3907. Allow multiple users for local block readers. (eli)
+
+    HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -750,7 +770,16 @@ Release 2.0.2-alpha - 2012-09-07
 
     HDFS-2757. Cannot read a local block that's being written to when
     using the local read short circuit. (Jean-Daniel Cryans via eli)
-    
+
+    HDFS-3664. BlockManager race when stopping active services.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3928. MiniDFSCluster should reset the first ExitException on shutdown. (eli)
+   
+    HDFS-3938. remove current limitations from HttpFS docs. (tucu)
+
+    HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
+ 
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -770,6 +799,9 @@ Release 2.0.2-alpha - 2012-09-07
     HDFS-3833. TestDFSShell fails on windows due to concurrent file 
     read/write. (Brandon Li via suresh)
 
+    HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken.
+    (Andy Isaacson via eli)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -189,6 +189,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <startKdc>${startKdc}</startKdc>
             <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
           </systemPropertyVariables>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
         </configuration>
       </plugin>
       <plugin>

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,10 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 public class HdfsVolumeId implements VolumeId {
 
-  private final byte id;
+  private final byte[] id;
   private final boolean isValid;
 
-  public HdfsVolumeId(byte id, boolean isValid) {
+  public HdfsVolumeId(byte[] id, boolean isValid) {
     this.id = id;
     this.isValid = isValid;
   }
@@ -69,6 +70,6 @@ public class HdfsVolumeId implements VolumeId {
 
   @Override
   public String toString() {
-    return Byte.toString(id);
+    return Base64.encodeBase64String(id);
   }
 }

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -201,7 +202,7 @@ class BlockStorageLocationUtil {
       ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
       // Start off all IDs as invalid, fill it in later with results from RPCs
       for (int i = 0; i < b.getLocations().length; i++) {
-        l.add(new HdfsVolumeId((byte)-1, false));
+        l.add(new HdfsVolumeId(null, false));
       }
       blockVolumeIds.put(b, l);
     }
@@ -234,8 +235,8 @@ class BlockStorageLocationUtil {
         }
         // Get the VolumeId by indexing into the list of VolumeIds
         // provided by the datanode
-        HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0],
-            true);
+        byte[] volumeId = metaVolumeIds.get(volumeIndex);
+        HdfsVolumeId id = new HdfsVolumeId(volumeId, true);
         // Find out which index we are in the LocatedBlock's replicas
         LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
         DatanodeInfo[] dnInfos = locBlock.getLocations();
@@ -255,8 +256,8 @@ class BlockStorageLocationUtil {
         }
         // Place VolumeId at the same index as the DN's index in the list of
         // replicas
-        List<VolumeId> VolumeIds = blockVolumeIds.get(locBlock);
-        VolumeIds.set(index, id);
+        List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
+        volumeIds.set(index, id);
       }
     }
     return blockVolumeIds;

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -174,6 +174,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
   public static final String  DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
   public static final int     DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
+  
+  // Whether to enable datanode's stale state detection and usage
+  public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode";
+  public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false;
+  // The default value of the time interval for marking datanodes as stale
+  public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = "dfs.namenode.stale.datanode.interval";
+  public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT = 30 * 1000; // 30s
 
   // Replication monitoring related keys
   public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =

+ 37 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -128,6 +128,43 @@ public class DFSUtil {
           a.isDecommissioned() ? 1 : -1;
       }
     };
+    
+      
+  /**
+   * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
+   * Decommissioned/stale nodes are moved to the end of the array on sorting
+   * with this compartor.
+   */ 
+  @InterfaceAudience.Private 
+  public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
+    private long staleInterval;
+
+    /**
+     * Constructor of DecomStaleComparator
+     * 
+     * @param interval
+     *          The time invertal for marking datanodes as stale is passed from
+     *          outside, since the interval may be changed dynamically
+     */
+    public DecomStaleComparator(long interval) {
+      this.staleInterval = interval;
+    }
+
+    @Override
+    public int compare(DatanodeInfo a, DatanodeInfo b) {
+      // Decommissioned nodes will still be moved to the end of the list
+      if (a.isDecommissioned()) {
+        return b.isDecommissioned() ? 0 : 1;
+      } else if (b.isDecommissioned()) {
+        return -1;
+      }
+      // Stale nodes will be moved behind the normal nodes
+      boolean aStale = a.isStale(staleInterval);
+      boolean bStale = b.isStale(staleInterval);
+      return aStale == bStale ? 0 : (aStale ? 1 : -1);
+    }
+  }    
+    
   /**
    * Address matcher for matching an address to local address
    */

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -105,4 +105,9 @@ public class HdfsConfiguration extends Configuration {
     deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
     deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
   }
+
+  public static void main(String[] args) {
+    init();
+    Configuration.dumpDeprecatedKeys();
+  }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
@@ -67,6 +65,8 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl;
 
 import com.google.common.base.Preconditions;
 
@@ -218,7 +218,7 @@ public class NameNodeProxies {
       throws IOException {
     GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB)
         createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class, 0);
-    return new GetUserMappingsProtocolClientSideTranslatorPB(proxy);
+    return new GetUserMappingsProtocolPBClientImpl(proxy);
   }
   
   private static NamenodeProtocol createNNProxyWithNamenodeProtocol(

+ 19 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 
 /** 
  * This class extends the primary identifier of a Datanode with ephemeral
@@ -321,7 +322,24 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     return adminState;
   }
-
+ 
+  /**
+   * Check if the datanode is in stale state. Here if 
+   * the namenode has not received heartbeat msg from a 
+   * datanode for more than staleInterval (default value is
+   * {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT}),
+   * the datanode will be treated as stale node.
+   * 
+   * @param staleInterval
+   *          the time interval for marking the node as stale. If the last
+   *          update time is beyond the given time interval, the node will be
+   *          marked as stale.
+   * @return true if the node is stale
+   */
+  public boolean isStale(long staleInterval) {
+    return (Time.now() - lastUpdate) >= staleInterval;
+  }
+  
   /**
    * Sets the admin state of this node.
    */

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java

@@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 
+import com.google.protobuf.TextFormat;
+
 /** Pipeline Acknowledgment **/
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -120,6 +122,6 @@ public class PipelineAck {
   
   @Override //Object
   public String toString() {
-    return proto.toString();
+    return TextFormat.shortDebugString(proto);
   }
 }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -363,11 +363,10 @@ public class BlockManager {
         replicationThread.join(3000);
       }
     } catch (InterruptedException ie) {
-    } finally {
-      if (pendingReplications != null) pendingReplications.stop();
-      blocksMap.close();
-      datanodeManager.close();
     }
+    datanodeManager.close();
+    pendingReplications.stop();
+    blocksMap.close();
   }
 
   /** @return the datanodeManager */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -94,7 +94,7 @@ class BlocksMap {
   }
 
   void close() {
-    blocks = null;
+    // Empty blocks once GSet#clear is implemented (HDFS-3940)
   }
 
   BlockCollection getBlockCollection(Block b) {

+ 35 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -132,6 +133,11 @@ public class DatanodeManager {
    */
   private boolean hasClusterEverBeenMultiRack = false;
   
+  /** Whether or not to check the stale datanodes */
+  private volatile boolean checkForStaleNodes;
+  /** The time interval for detecting stale datanodes */
+  private volatile long staleInterval;
+  
   DatanodeManager(final BlockManager blockManager,
       final Namesystem namesystem, final Configuration conf
       ) throws IOException {
@@ -175,6 +181,21 @@ public class DatanodeManager {
         DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
     LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
         + "=" + this.blockInvalidateLimit);
+    // set the value of stale interval based on configuration
+    this.checkForStaleNodes = conf.getBoolean(
+        DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
+        DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
+    if (this.checkForStaleNodes) {
+      this.staleInterval = conf.getLong(
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT);
+      if (this.staleInterval < DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT) {
+        LOG.warn("The given interval for marking stale datanode = "
+            + this.staleInterval + ", which is smaller than the default value "
+            + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT
+            + ".");
+      }
+    }
   }
 
   private Daemon decommissionthread = null;
@@ -192,7 +213,13 @@ public class DatanodeManager {
   }
 
   void close() {
-    if (decommissionthread != null) decommissionthread.interrupt();
+    if (decommissionthread != null) {
+      decommissionthread.interrupt();
+      try {
+        decommissionthread.join(3000);
+      } catch (InterruptedException e) {
+      }
+    }
     heartbeatManager.close();
   }
 
@@ -225,14 +252,17 @@ public class DatanodeManager {
       if (rName != null)
         client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
     }
+    
+    Comparator<DatanodeInfo> comparator = checkForStaleNodes ? 
+                    new DFSUtil.DecomStaleComparator(staleInterval) : 
+                    DFSUtil.DECOM_COMPARATOR;
     for (LocatedBlock b : locatedblocks) {
       networktopology.pseudoSortByDistance(client, b.getLocations());
-      
-      // Move decommissioned datanodes to the bottom
-      Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
+      // Move decommissioned/stale datanodes to the bottom
+      Arrays.sort(b.getLocations(), comparator);
     }
   }
-
+  
   CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
       final String firstkey) {
     return new CyclicIteration<String, DatanodeDescriptor>(

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -74,6 +74,11 @@ class HeartbeatManager implements DatanodeStatistics {
 
   void close() {
     heartbeatThread.interrupt();
+    try {
+      // This will no effect if the thread hasn't yet been started.
+      heartbeatThread.join(3000);
+    } catch (InterruptedException e) {
+    }
   }
   
   synchronized int getLiveDatanodeCount() {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -374,7 +374,8 @@ class BlockPoolSliceScanner {
     throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
   }
   
-  private void verifyBlock(ExtendedBlock block) {
+  @VisibleForTesting
+  void verifyBlock(ExtendedBlock block) {
     BlockSender blockSender = null;
 
     /* In case of failure, attempt to read second time to reduce

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java

@@ -172,7 +172,8 @@ public class DataBlockScanner implements Runnable {
     return blockPoolScannerMap.size();
   }
   
-  private synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
+  @VisibleForTesting
+  synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
     return blockPoolScannerMap.get(bpid);
   }
   

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -277,7 +277,7 @@ public class DataNode extends Configured
   private AbstractList<File> dataDirs;
   private Configuration conf;
 
-  private final String userWithLocalPathAccess;
+  private final List<String> usersWithLocalPathAccess;
   private boolean connectToDnViaHostname;
   ReadaheadPool readaheadPool;
   private final boolean getHdfsBlockLocationsEnabled;
@@ -300,8 +300,8 @@ public class DataNode extends Configured
            final SecureResources resources) throws IOException {
     super(conf);
 
-    this.userWithLocalPathAccess =
-        conf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
+    this.usersWithLocalPathAccess = Arrays.asList(
+        conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
     this.connectToDnViaHostname = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
@@ -1012,7 +1012,7 @@ public class DataNode extends Configured
   private void checkBlockLocalPathAccess() throws IOException {
     checkKerberosAuthMethod("getBlockLocalPathInfo()");
     String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
-    if (!currentUser.equals(this.userWithLocalPathAccess)) {
+    if (!usersWithLocalPathAccess.contains(currentUser)) {
       throw new AccessControlException(
           "Can't continue with getBlockLocalPathInfo() "
               + "authorization. The user " + currentUser

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -24,6 +24,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -1676,10 +1677,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
     // List of indexes into the list of VolumeIds, pointing at the VolumeId of
     // the volume that the block is on
-    List<Integer> blocksVolumendexes = new ArrayList<Integer>(blocks.size());
+    List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blocks.size());
     // Initialize the list of VolumeIds simply by enumerating the volumes
     for (int i = 0; i < volumes.volumes.size(); i++) {
-      blocksVolumeIds.add(new byte[] { (byte) i });
+      blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
     }
     // Determine the index of the VolumeId of each block's volume, by comparing 
     // the block's volume against the enumerated volumes
@@ -1700,10 +1701,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       if (!isValid) {
         volumeIndex = Integer.MAX_VALUE;
       }
-      blocksVolumendexes.add(volumeIndex);
+      blocksVolumeIndexes.add(volumeIndex);
     }
     return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), 
-        blocksVolumeIds, blocksVolumendexes);
+        blocksVolumeIds, blocksVolumeIndexes);
   }
 
   @Override

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1173,6 +1173,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (blocks != null) {
       blockManager.getDatanodeManager().sortLocatedBlocks(
           clientMachine, blocks.getLocatedBlocks());
+      
+      LocatedBlock lastBlock = blocks.getLastLocatedBlock();
+      if (lastBlock != null) {
+        ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
+        lastBlockList.add(lastBlock);
+        blockManager.getDatanodeManager().sortLocatedBlocks(
+                              clientMachine, lastBlockList);
+      }
     }
     return blocks;
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -71,7 +71,7 @@ public class FileChecksumServlets {
         String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
         dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
       }
-      String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+      String addr = nn.getNameNodeAddressHostPortString();
       String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
 
       return new URL(scheme, hostname, port, 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ServletUtil;
 
@@ -74,7 +73,7 @@ public class FileDataServlet extends DfsServlet {
     // Add namenode address to the url params
     NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
         getServletContext());
-    String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+    String addr = nn.getNameNodeAddressHostPortString();
     String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
     
     return new URL(scheme, hostname, port,

+ 22 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@@ -489,9 +488,9 @@ public class NameNode {
         LOG.warn("ServicePlugin " + p + " could not be started", t);
       }
     }
-    LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress());
+    LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
     if (rpcServer.getServiceRpcAddress() != null) {
-      LOG.info(getRole() + " service server is up at: "
+      LOG.info(getRole() + " service RPC up at: "
           + rpcServer.getServiceRpcAddress());
     }
   }
@@ -617,7 +616,7 @@ public class NameNode {
    */
   public void join() {
     try {
-      this.rpcServer.join();
+      rpcServer.join();
     } catch (InterruptedException ie) {
       LOG.info("Caught interrupted exception ", ie);
     }
@@ -665,27 +664,31 @@ public class NameNode {
   }
 
   /**
-   * Returns the address on which the NameNodes is listening to.
-   * @return namenode rpc address
+   * @return NameNode RPC address
    */
   public InetSocketAddress getNameNodeAddress() {
     return rpcServer.getRpcAddress();
   }
-  
+
+  /**
+   * @return NameNode RPC address in "host:port" string form
+   */
+  public String getNameNodeAddressHostPortString() {
+    return NetUtils.getHostPortString(rpcServer.getRpcAddress());
+  }
+
   /**
-   * Returns namenode service rpc address, if set. Otherwise returns
-   * namenode rpc address.
-   * @return namenode service rpc address used by datanodes
+   * @return NameNode service RPC address if configured, the
+   *    NameNode RPC address otherwise
    */
   public InetSocketAddress getServiceRpcAddress() {
-    return rpcServer.getServiceRpcAddress() != null ? rpcServer.getServiceRpcAddress() : rpcServer.getRpcAddress();
+    final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress();
+    return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr;
   }
 
   /**
-   * Returns the address of the NameNodes http server, 
-   * which is used to access the name-node web UI.
-   * 
-   * @return the http address.
+   * @return NameNode HTTP address, used by the Web UI, image transfer,
+   *    and HTTP-based file system clients like Hftp and WebHDFS
    */
   public InetSocketAddress getHttpAddress() {
     return httpServer.getHttpAddress();
@@ -1171,10 +1174,12 @@ public class NameNode {
           NAMESERVICE_SPECIFIC_KEYS);
     }
     
+    // If the RPC address is set use it to (re-)configure the default FS
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
       conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+      LOG.info("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
     }
   }
     
@@ -1196,8 +1201,9 @@ public class NameNode {
     try {
       StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
       NameNode namenode = createNameNode(argv, null);
-      if (namenode != null)
+      if (namenode != null) {
         namenode.join();
+      }
     } catch (Throwable e) {
       LOG.fatal("Exception in namenode join", e);
       terminate(1, e);

+ 0 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -49,12 +49,9 @@ public class NameNodeHttpServer {
   private final Configuration conf;
   private final NameNode nn;
   
-  private final Log LOG = NameNode.LOG;
   private InetSocketAddress httpAddress;
-  
   private InetSocketAddress bindAddress;
   
-  
   public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
   public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
   protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
@@ -68,12 +65,6 @@ public class NameNodeHttpServer {
     this.bindAddress = bindAddress;
   }
   
-  private String getDefaultServerPrincipal() throws IOException {
-    return SecurityUtil.getServerPrincipal(
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
-        nn.getNameNodeAddress().getHostName());
-  }
-
   public void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
     int infoPort = bindAddress.getPort();

+ 53 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
@@ -74,8 +73,6 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
@@ -119,6 +116,9 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.impl.pb.service.GetUserMappingsProtocolPBServiceImpl;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetUserMappingsProtocolService;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionUtil;
 
@@ -159,10 +159,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
     int handlerCount = 
       conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, 
                   DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
-    InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
-		RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
-         ProtobufRpcEngine.class);
-     ClientNamenodeProtocolServerSideTranslatorPB 
+
+    RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    ClientNamenodeProtocolServerSideTranslatorPB 
        clientProtocolServerTranslator = 
          new ClientNamenodeProtocolServerSideTranslatorPB(this);
      BlockingService clientNNPbService = ClientNamenodeProtocol.
@@ -188,8 +189,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
     BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
         .newReflectiveBlockingService(refreshUserMappingXlator);
 
-    GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = 
-        new GetUserMappingsProtocolServerSideTranslatorPB(this);
+    GetUserMappingsProtocolPBServiceImpl getUserMappingXlator = 
+        new GetUserMappingsProtocolPBServiceImpl(this);
     BlockingService getUserMappingService = GetUserMappingsProtocolService
         .newReflectiveBlockingService(getUserMappingXlator);
     
@@ -199,22 +200,24 @@ class NameNodeRpcServer implements NamenodeProtocols {
         .newReflectiveBlockingService(haServiceProtocolXlator);
 	  
     WritableRpcEngine.ensureInitialized();
-    
-    InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf);
-    if (dnSocketAddr != null) {
+
+    InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
+    if (serviceRpcAddr != null) {
       int serviceHandlerCount =
         conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
                     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
-      // Add all the RPC protocols that the namenode implements
-      this.serviceRpcServer = new RPC.Builder(conf)
+      serviceRpcServer = new RPC.Builder(conf)
           .setProtocol(
               org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
           .setInstance(clientNNPbService)
-          .setBindAddress(dnSocketAddr.getHostName())
-          .setPort(dnSocketAddr.getPort()).setNumHandlers(serviceHandlerCount)
+          .setBindAddress(serviceRpcAddr.getHostName())
+          .setPort(serviceRpcAddr.getPort())
+          .setNumHandlers(serviceHandlerCount)
           .setVerbose(false)
           .setSecretManager(namesystem.getDelegationTokenSecretManager())
           .build();
+
+      // Add all the RPC protocols that the namenode implements
       DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
           serviceRpcServer);
       DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
@@ -228,20 +231,26 @@ class NameNodeRpcServer implements NamenodeProtocols {
       DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
           getUserMappingService, serviceRpcServer);
   
-      this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+      serviceRPCAddress = serviceRpcServer.getListenerAddress();
       nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
     } else {
       serviceRpcServer = null;
       serviceRPCAddress = null;
     }
-    // Add all the RPC protocols that the namenode implements
-    this.clientRpcServer = new RPC.Builder(conf)
+
+    InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
+    clientRpcServer = new RPC.Builder(conf)
         .setProtocol(
             org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
-        .setInstance(clientNNPbService).setBindAddress(socAddr.getHostName())
-        .setPort(socAddr.getPort()).setNumHandlers(handlerCount)
+        .setInstance(clientNNPbService)
+        .setBindAddress(rpcAddr.getHostName())
+        .setPort(rpcAddr.getPort())
+        .setNumHandlers(handlerCount)
         .setVerbose(false)
-        .setSecretManager(namesystem.getDelegationTokenSecretManager()).build();
+        .setSecretManager(namesystem.getDelegationTokenSecretManager())
+        .build();
+
+    // Add all the RPC protocols that the namenode implements
     DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
         clientRpcServer);
     DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
@@ -259,44 +268,51 @@ class NameNodeRpcServer implements NamenodeProtocols {
     if (serviceAuthEnabled =
           conf.getBoolean(
             CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
-      this.clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
-      if (this.serviceRpcServer != null) {
-        this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      if (serviceRpcServer != null) {
+        serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
       }
     }
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
-    this.clientRpcAddress = this.clientRpcServer.getListenerAddress(); 
+    clientRpcAddress = clientRpcServer.getListenerAddress();
     nn.setRpcServerAddress(conf, clientRpcAddress);
     
-    this.minimumDataNodeVersion = conf.get(
+    minimumDataNodeVersion = conf.get(
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
 
     // Set terse exception whose stack trace won't be logged
-    this.clientRpcServer.addTerseExceptions(SafeModeException.class);
+    clientRpcServer.addTerseExceptions(SafeModeException.class);
  }
   
   /**
-   * Actually start serving requests.
+   * Start client and service RPC servers.
    */
   void start() {
-    clientRpcServer.start();  //start RPC server
+    clientRpcServer.start();
     if (serviceRpcServer != null) {
       serviceRpcServer.start();      
     }
   }
   
   /**
-   * Wait until the RPC server has shut down.
+   * Wait until the client RPC server has shutdown.
    */
   void join() throws InterruptedException {
-    this.clientRpcServer.join();
+    clientRpcServer.join();
   }
-  
+
+  /**
+   * Stop client and service RPC servers.
+   */
   void stop() {
-    if(clientRpcServer != null) clientRpcServer.stop();
-    if(serviceRpcServer != null) serviceRpcServer.stop();
+    if (clientRpcServer != null) {
+      clientRpcServer.stop();
+    }
+    if (serviceRpcServer != null) {
+      serviceRpcServer.stop();
+    }
   }
   
   InetSocketAddress getServiceRpcAddress() {
@@ -333,8 +349,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     verifyRequest(registration);
     LOG.info("Error report from " + registration + ": " + msg);
-    if(errorCode == FATAL)
+    if (errorCode == FATAL) {
       namesystem.releaseBackupNode(registration);
+    }
   }
 
   @Override // NamenodeProtocol

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -444,7 +443,7 @@ class NamenodeJspHelper {
       nodeToRedirect = nn.getHttpAddress().getHostName();
       redirectPort = nn.getHttpAddress().getPort();
     }
-    String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+    String addr = nn.getNameNodeAddressHostPortString();
     String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
     redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
         + "/browseDirectory.jsp?namenodeInfoPort="
@@ -615,8 +614,9 @@ class NamenodeJspHelper {
       final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
       dm.fetchDatanodes(live, dead, true);
 
-      InetSocketAddress nnSocketAddress = (InetSocketAddress) context
-          .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
+      InetSocketAddress nnSocketAddress =
+          (InetSocketAddress)context.getAttribute(
+              NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
       String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
           + nnSocketAddress.getPort();
 

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -965,6 +965,30 @@
   <value>${dfs.web.authentication.kerberos.principal}</value>
 </property>
 
+<property>
+  <name>dfs.namenode.check.stale.datanode</name>
+  <value>false</value>
+  <description>
+  	Indicate whether or not to check "stale" datanodes whose 
+  	heartbeat messages have not been received by the namenode 
+  	for more than a specified time interval. If this configuration 
+  	parameter is set as true, the stale datanodes will be moved to 
+  	the end of the target node list for reading. The writing will 
+  	also try to avoid stale nodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.stale.datanode.interval</name>
+  <value>30000</value>
+  <description>
+  	Default time interval for marking a datanode as "stale", i.e., if 
+  	the namenode has not received heartbeat msg from a datanode for 
+  	more than this time interval, the datanode will be marked and treated 
+  	as "stale" by default.
+  </description>
+</property>
+
 <property>
   <name>dfs.namenode.invalidate.work.pct.per.iteration</name>
   <value>0.32f</value>

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp

@@ -34,8 +34,7 @@
   HAServiceState nnHAState = nn.getServiceState();
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   String namenodeRole = nn.getRole().toString();
-  String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
-      + nn.getNameNodeAddress().getPort();
+  String namenodeLabel = nn.getNameNodeAddressHostPortString();
   Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
 	fsn.listCorruptFileBlocks("/", null);
   int corruptFileCount = corruptFileBlocks.size();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp

@@ -34,7 +34,7 @@
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   String namenodeRole = nn.getRole().toString();
   String namenodeState = nnHAState.toString();
-  String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+  String namenodeLabel = nn.getNameNodeAddressHostPortString();
 %>
 
 <!DOCTYPE html>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp

@@ -33,7 +33,7 @@ String namenodeRole = nn.getRole().toString();
 FSNamesystem fsn = nn.getNamesystem();
 HAServiceState nnHAState = nn.getServiceState();
 boolean isActive = (nnHAState == HAServiceState.ACTIVE);
-String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+String namenodeLabel = nn.getNameNodeAddressHostPortString();
 %>
 
 <!DOCTYPE html>

+ 27 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -274,7 +274,7 @@ public class DFSTestUtil {
    * specified target.
    */
   public void waitReplication(FileSystem fs, String topdir, short value) 
-                                              throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     Path root = new Path(topdir);
 
     /** wait for the replication factor to settle down */
@@ -499,36 +499,44 @@ public class DFSTestUtil {
       return fileNames;
     }
   }
-  
-  /** wait for the file's replication to be done */
-  public static void waitReplication(FileSystem fs, Path fileName, 
-      short replFactor)  throws IOException {
-    boolean good;
+
+  /**
+   * Wait for the given file to reach the given replication factor.
+   * @throws TimeoutException if we fail to sufficiently replicate the file
+   */
+  public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
+      throws IOException, InterruptedException, TimeoutException {
+    boolean correctReplFactor;
+    final int ATTEMPTS = 20;
+    int count = 0;
+
     do {
-      good = true;
+      correctReplFactor = true;
       BlockLocation locs[] = fs.getFileBlockLocations(
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+      count++;
       for (int j = 0; j < locs.length; j++) {
         String[] hostnames = locs[j].getNames();
         if (hostnames.length != replFactor) {
-          String hostNameList = "";
-          for (String h : hostnames) hostNameList += h + " ";
-          System.out.println("Block " + j + " of file " + fileName 
-              + " has replication factor " + hostnames.length + "; locations "
-              + hostNameList);
-          good = false;
-          try {
-            System.out.println("Waiting for replication factor to drain");
-            Thread.sleep(100);
-          } catch (InterruptedException e) {} 
+          correctReplFactor = false;
+          System.out.println("Block " + j + " of file " + fileName
+              + " has replication factor " + hostnames.length
+              + " (desired " + replFactor + "); locations "
+              + Joiner.on(' ').join(hostnames));
+          Thread.sleep(1000);
           break;
         }
       }
-      if (good) {
+      if (correctReplFactor) {
         System.out.println("All blocks of file " + fileName
             + " verified to have replication factor " + replFactor);
       }
-    } while(!good);
+    } while (!correctReplFactor && count < ATTEMPTS);
+
+    if (count == ATTEMPTS) {
+      throw new TimeoutException("Timed out waiting for " + fileName +
+          " to reach " + replFactor + " replicas");
+    }
   }
   
   /** delete directory and everything underneath it.*/

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -857,8 +857,8 @@ public class MiniDFSCluster {
     // After the NN has started, set back the bound ports into
     // the conf
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils
-        .getHostPortString(nn.getNameNodeAddress()));
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
+        nn.getNameNodeAddressHostPortString());
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
         .getHostPortString(nn.getHttpAddress()));
@@ -880,8 +880,8 @@ public class MiniDFSCluster {
    * @return URI of the given namenode in MiniDFSCluster
    */
   public URI getURI(int nnIndex) {
-    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
-    String hostPort = NetUtils.getHostPortString(addr);
+    String hostPort =
+        nameNodes[nnIndex].nameNode.getNameNodeAddressHostPortString();
     URI uri = null;
     try {
       uri = new URI("hdfs://" + hostPort);
@@ -918,7 +918,8 @@ public class MiniDFSCluster {
   /**
    * wait for the cluster to get out of safemode.
    */
-  public void waitClusterUp() {
+  public void waitClusterUp() throws IOException {
+    int i = 0;
     if (numDataNodes > 0) {
       while (!isClusterUp()) {
         try {
@@ -926,6 +927,9 @@ public class MiniDFSCluster {
           Thread.sleep(1000);
         } catch (InterruptedException e) {
         }
+        if (++i > 10) {
+          throw new IOException("Timed out waiting for Mini HDFS Cluster to start");
+        }
       }
     }
   }
@@ -1354,6 +1358,7 @@ public class MiniDFSCluster {
       if (ExitUtil.terminateCalled()) {
         LOG.fatal("Test resulted in an unexpected exit",
             ExitUtil.getFirstExitException());
+        ExitUtil.resetFirstExitException();
         throw new AssertionError("Test resulted in an unexpected exit");
       }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java

@@ -61,7 +61,7 @@ public class TestBlockReaderLocal {
    * of this class might immediately issue a retry on failure, so it's polite.
    */
   @Test
-  public void testStablePositionAfterCorruptRead() throws IOException {
+  public void testStablePositionAfterCorruptRead() throws Exception {
     final short REPL_FACTOR = 1;
     final long FILE_LENGTH = 512L;
     cluster.waitActive();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -199,11 +200,11 @@ public class TestClientReportBadBlock {
   }
 
   /**
-   * create a file with one block and corrupt some/all of the block replicas.
+   * Create a file with one block and corrupt some/all of the block replicas.
    */
   private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
       int corruptBlockCount) throws IOException, AccessControlException,
-      FileNotFoundException, UnresolvedLinkException {
+      FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
     DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
     DFSTestUtil.waitReplication(dfs, filePath, repl);
     // Locate the file blocks by asking name node

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -789,8 +789,7 @@ public class TestDFSClientRetries {
    * way. See HDFS-3067.
    */
   @Test
-  public void testRetryOnChecksumFailure()
-      throws UnresolvedLinkException, IOException {
+  public void testRetryOnChecksumFailure() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -831,7 +830,7 @@ public class TestDFSClientRetries {
   }
 
   /** Test client retry with namenode restarting. */
-  @Test
+  @Test(timeout=300000)
   public void testNamenodeRestart() throws Exception {
     namenodeRestartTest(new Configuration(), false);
   }

+ 16 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -34,14 +34,19 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
 import org.junit.Test;
 
 /**
@@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner {
   
   private static Pattern pattern_blockVerify = 
              Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
+  
+  static {
+    ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
+  }
   /**
    * This connects to datanode and fetches block verification data.
    * It repeats this until the given block has a verification time > newTime.
@@ -173,7 +182,7 @@ public class TestDatanodeBlockScanner {
   }
 
   @Test
-  public void testBlockCorruptionPolicy() throws IOException {
+  public void testBlockCorruptionPolicy() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     Random random = new Random();
@@ -206,12 +215,12 @@ public class TestDatanodeBlockScanner {
     assertTrue(MiniDFSCluster.corruptReplica(1, block));
     assertTrue(MiniDFSCluster.corruptReplica(2, block));
 
-    // Read the file to trigger reportBadBlocks by client
-    try {
-      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
-                        conf, true);
-    } catch (IOException e) {
-      // Ignore exception
+    // Trigger each of the DNs to scan this block immediately.
+    // The block pool scanner doesn't run frequently enough on its own
+    // to notice these, and due to HDFS-1371, the client won't report
+    // bad blocks to the NN when all replicas are bad.
+    for (DataNode dn : cluster.getDataNodes()) {
+      DataNodeTestUtils.runBlockScannerForBlock(dn, block);
     }
 
     // We now have the blocks to be marked as corrupt and we get back all

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java

@@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -100,7 +101,7 @@ public class TestFileStatus {
   }
   
   private void checkFile(FileSystem fileSys, Path name, int repl)
-      throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     DFSTestUtil.waitReplication(fileSys, name, (short) repl);
   }
   
@@ -129,7 +130,7 @@ public class TestFileStatus {
 
   /** Test the FileStatus obtained calling getFileStatus on a file */  
   @Test
-  public void testGetFileStatusOnFile() throws IOException {
+  public void testGetFileStatusOnFile() throws Exception {
     checkFile(fs, file1, 1);
     // test getFileStatus on a file
     FileStatus status = fs.getFileStatus(file1);

+ 164 - 41
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -28,48 +27,178 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
+
 /**
- * This class tests if block replacement request to data nodes work correctly.
+ * This class tests if getblocks request works correctly.
  */
 public class TestGetBlocks {
+  private static final int blockSize = 8192;
+  private static final String racks[] = new String[] { "/d1/r1", "/d1/r1",
+      "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" };
+  private static final int numDatanodes = racks.length;
+
+  /**
+   * Stop the heartbeat of a datanode in the MiniDFSCluster
+   * 
+   * @param cluster
+   *          The MiniDFSCluster
+   * @param hostName
+   *          The hostName of the datanode to be stopped
+   * @return The DataNode whose heartbeat has been stopped
+   */
+  private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) {
+    for (DataNode dn : cluster.getDataNodes()) {
+      if (dn.getDatanodeId().getHostName().equals(hostName)) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+        return dn;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Test if the datanodes returned by
+   * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct
+   * when stale nodes checking is enabled. Also test during the scenario when 1)
+   * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
+   * becomes stale happen simultaneously
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testReadSelectNonStaleDatanode() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    long staleInterval = 30 * 1000 * 60;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+        staleInterval);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDatanodes).racks(racks).build();
+
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+        cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
+        .getNamesystem().getBlockManager().getDatanodeManager()
+        .getDatanodeListForReport(DatanodeReportType.LIVE);
+    assertEquals("Unexpected number of datanodes", numDatanodes,
+        nodeInfoList.size());
+    FileSystem fileSys = cluster.getFileSystem();
+    FSDataOutputStream stm = null;
+    try {
+      // do the writing but do not close the FSDataOutputStream
+      // in order to mimic the ongoing writing
+      final Path fileName = new Path("/file1");
+      stm = fileSys.create(
+          fileName,
+          true,
+          fileSys.getConf().getInt(
+              CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+          (short) 3, blockSize);
+      stm.write(new byte[(blockSize * 3) / 2]);
+      // We do not close the stream so that
+      // the writing seems to be still ongoing
+      stm.hflush();
+
+      LocatedBlocks blocks = client.getNamenode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodes = blocks.get(0).getLocations();
+      assertEquals(nodes.length, 3);
+      DataNode staleNode = null;
+      DatanodeDescriptor staleNodeInfo = null;
+      // stop the heartbeat of the first node
+      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+      assertNotNull(staleNode);
+      // set the first node as stale
+      staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager()
+          .getDatanode(staleNode.getDatanodeId());
+      staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
+
+      LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
+      assertEquals(nodesAfterStale.length, 3);
+      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+
+      // restart the staleNode's heartbeat
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false);
+      // reset the first node as non-stale, so as to avoid two stale nodes
+      staleNodeInfo.setLastUpdate(Time.now());
+
+      LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0,
+          Long.MAX_VALUE).getLastLocatedBlock();
+      nodes = lastBlock.getLocations();
+      assertEquals(nodes.length, 3);
+      // stop the heartbeat of the first node for the last block
+      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+      assertNotNull(staleNode);
+      // set the node as stale
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager()
+          .getDatanode(staleNode.getDatanodeId())
+          .setLastUpdate(Time.now() - staleInterval - 1);
+
+      LocatedBlock lastBlockAfterStale = client.getLocatedBlocks(
+          fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock();
+      nodesAfterStale = lastBlockAfterStale.getLocations();
+      assertEquals(nodesAfterStale.length, 3);
+      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+    } finally {
+      if (stm != null) {
+        stm.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
   /** test getBlocks */
   @Test
   public void testGetBlocks() throws Exception {
     final Configuration CONF = new HdfsConfiguration();
 
-    final short REPLICATION_FACTOR = (short)2;
+    final short REPLICATION_FACTOR = (short) 2;
     final int DEFAULT_BLOCK_SIZE = 1024;
     final Random r = new Random();
-    
+
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
-                                               .numDataNodes(REPLICATION_FACTOR)
-                                               .build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
+        REPLICATION_FACTOR).build();
     try {
       cluster.waitActive();
-      
+
       // create a file with two blocks
       FileSystem fs = cluster.getFileSystem();
       FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
           REPLICATION_FACTOR);
-      byte [] data = new byte[1024];
-      long fileLen = 2*DEFAULT_BLOCK_SIZE;
+      byte[] data = new byte[1024];
+      long fileLen = 2 * DEFAULT_BLOCK_SIZE;
       long bytesToWrite = fileLen;
-      while( bytesToWrite > 0 ) {
+      while (bytesToWrite > 0) {
         r.nextBytes(data);
-        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
+        int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
+            : (int) bytesToWrite;
         out.write(data, 0, bytesToWriteNext);
         bytesToWrite -= bytesToWriteNext;
       }
@@ -77,27 +206,28 @@ public class TestGetBlocks {
 
       // get blocks & data nodes
       List<LocatedBlock> locatedBlocks;
-      DatanodeInfo[] dataNodes=null;
+      DatanodeInfo[] dataNodes = null;
       boolean notWritten;
       do {
-        final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
-        locatedBlocks = dfsclient.getNamenode().
-          getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
+        final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF),
+            CONF);
+        locatedBlocks = dfsclient.getNamenode()
+            .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());
         notWritten = false;
-        for(int i=0; i<2; i++) {
+        for (int i = 0; i < 2; i++) {
           dataNodes = locatedBlocks.get(i).getLocations();
-          if(dataNodes.length != REPLICATION_FACTOR) {
+          if (dataNodes.length != REPLICATION_FACTOR) {
             notWritten = true;
             try {
               Thread.sleep(10);
-            } catch(InterruptedException e) {
+            } catch (InterruptedException e) {
             }
             break;
           }
         }
-      } while(notWritten);
-      
+      } while (notWritten);
+
       // get RPC client to namenode
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
@@ -122,7 +252,7 @@ public class TestGetBlocks {
       assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 0 from dataNodes[0]
-      getBlocksWithException(namenode, dataNodes[0], 0);     
+      getBlocksWithException(namenode, dataNodes[0], 0);
 
       // get blocks of size -1 from dataNodes[0]
       getBlocksWithException(namenode, dataNodes[0], -1);
@@ -136,46 +266,39 @@ public class TestGetBlocks {
   }
 
   private void getBlocksWithException(NamenodeProtocol namenode,
-                                      DatanodeInfo datanode,
-                                      long size) throws IOException {
+      DatanodeInfo datanode, long size) throws IOException {
     boolean getException = false;
     try {
-        namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
-    } catch(RemoteException e) {
+      namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
+    } catch (RemoteException e) {
       getException = true;
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
     }
     assertTrue(getException);
   }
- 
+
   @Test
   public void testBlockKey() {
     Map<Block, Long> map = new HashMap<Block, Long>();
     final Random RAN = new Random();
     final long seed = RAN.nextLong();
-    System.out.println("seed=" +  seed);
+    System.out.println("seed=" + seed);
     RAN.setSeed(seed);
 
-    long[] blkids = new long[10]; 
-    for(int i = 0; i < blkids.length; i++) {
+    long[] blkids = new long[10];
+    for (int i = 0; i < blkids.length; i++) {
       blkids[i] = 1000L + RAN.nextInt(100000);
       map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
     }
     System.out.println("map=" + map.toString().replace(",", "\n  "));
-    
-    for(int i = 0; i < blkids.length; i++) {
-      Block b = new Block(blkids[i], 0, GenerationStamp.GRANDFATHER_GENERATION_STAMP);
+
+    for (int i = 0; i < blkids.length; i++) {
+      Block b = new Block(blkids[i], 0,
+          GenerationStamp.GRANDFATHER_GENERATION_STAMP);
       Long v = map.get(b);
       System.out.println(b + " => " + v);
       assertEquals(blkids[i], v.longValue());
     }
   }
 
-  /**
-   * @param args
-   */
-  public static void main(String[] args) throws Exception {
-    (new TestGetBlocks()).testGetBlocks();
-  }
-
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
@@ -41,6 +40,7 @@ import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -141,8 +141,8 @@ public class TestIsMethodSupported {
   
   @Test
   public void testGetUserMappingsProtocol() throws IOException {
-    GetUserMappingsProtocolClientSideTranslatorPB translator = 
-        (GetUserMappingsProtocolClientSideTranslatorPB)
+    GetUserMappingsProtocolPBClientImpl translator = 
+        (GetUserMappingsProtocolPBClientImpl)
         NameNodeProxies.createNonHAProxy(conf, nnAddress,
             GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -27,6 +27,7 @@ import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -420,8 +421,8 @@ public class TestReplication {
     }
   }
   
-  private void changeBlockLen(MiniDFSCluster cluster, 
-      int lenDelta) throws IOException, InterruptedException {
+  private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+      throws IOException, InterruptedException, TimeoutException {
     final Path fileName = new Path("/file1");
     final short REPLICATION_FACTOR = (short)1;
     final FileSystem fs = cluster.getFileSystem();

+ 25 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -224,7 +224,8 @@ public class TestShortCircuitLocalRead {
   @Test
   public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
+    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+        "alloweduser1,alloweduser2");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .format(true).build();
     cluster.waitActive();
@@ -232,8 +233,10 @@ public class TestShortCircuitLocalRead {
     FileSystem fs = cluster.getFileSystem();
     try {
       DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
-      UserGroupInformation aUgi = UserGroupInformation
-          .createRemoteUser("alloweduser");
+      UserGroupInformation aUgi1 =
+          UserGroupInformation.createRemoteUser("alloweduser1");
+      UserGroupInformation aUgi2 =
+          UserGroupInformation.createRemoteUser("alloweduser2");
       LocatedBlocks lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations("/tmp/x", 0, 16);
       // Create a new block object, because the block inside LocatedBlock at
@@ -241,7 +244,7 @@ public class TestShortCircuitLocalRead {
       ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
-      ClientDatanodeProtocol proxy = aUgi
+      ClientDatanodeProtocol proxy = aUgi1
           .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
@@ -250,13 +253,29 @@ public class TestShortCircuitLocalRead {
             }
           });
       
-      //This should succeed
+      // This should succeed
       BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
       Assert.assertEquals(
           DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
           blpi.getBlockPath());
 
-      // Now try with a not allowed user.
+      // Try with the other allowed user
+      proxy = aUgi2
+          .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
+            @Override
+            public ClientDatanodeProtocol run() throws Exception {
+              return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
+                  60000, false);
+            }
+          });
+
+      // This should succeed as well
+      blpi = proxy.getBlockLocalPathInfo(blk, token);
+      Assert.assertEquals(
+          DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
+          blpi.getBlockPath());
+
+      // Now try with a disallowed user
       UserGroupInformation bUgi = UserGroupInformation
           .createRemoteUser("notalloweduser");
       proxy = bUgi

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -88,7 +88,7 @@ public class TestBalancer {
   /* create a file with a length of <code>fileLen</code> */
   static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
       short replicationFactor, int nnIndex)
-  throws IOException {
+  throws IOException, InterruptedException, TimeoutException {
     FileSystem fs = cluster.getFileSystem(nnIndex);
     DFSTestUtil.createFile(fs, filePath, fileLen, 
         replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
    * whose used space to be <code>size</code>
    */
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
-      short numNodes) throws IOException {
+      short numNodes) throws IOException, InterruptedException, TimeoutException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
       cluster.waitActive();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNameNodes {
 
   /* create a file with a length of <code>fileLen</code> */
   private static void createFile(Suite s, int index, long len
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final FileSystem fs = s.cluster.getFileSystem(index);
     DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
     DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNameNodes {
    * whose used space to be <code>size</code>
    */
   private static ExtendedBlock[][] generateBlocks(Suite s, long size
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     for(int n = 0; n < s.clients.length; n++) {
       final long fileLen = size/s.replication;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
    * corrupt ones.
    */
   @Test
-  public void testProcesOverReplicateBlock() throws IOException {
+  public void testProcesOverReplicateBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
    * send heartbeats. 
    */
   @Test
-  public void testChooseReplicaToDelete() throws IOException {
+  public void testChooseReplicaToDelete() throws Exception {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     try {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -114,6 +114,12 @@ public class DataNodeTestUtils {
         dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
   }
   
+  public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
+    DataBlockScanner scanner = dn.getBlockScanner();
+    BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
+    bpScanner.verifyBlock(b);
+  }
+  
   public static void shutdownBlockScanner(DataNode dn) {
     if (dn.blockScanner != null) {
       dn.blockScanner.shutdown();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -89,7 +89,7 @@ public class TestBlockReplacement {
   }
   
   @Test
-  public void testBlockReplacement() throws IOException, TimeoutException {
+  public void testBlockReplacement() throws Exception {
     final Configuration CONF = new HdfsConfiguration();
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
     final String[] NEW_RACKS = {"/RACK2"};

+ 16 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java

@@ -27,6 +27,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -65,7 +68,7 @@ import org.mockito.invocation.InvocationOnMock;
 
 /**
  * This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
  * report is happening
  */
 public class TestBlockReport {
@@ -316,7 +319,7 @@ public class TestBlockReport {
    * @throws IOException in case of an error
    */
   @Test
-  public void blockReport_06() throws IOException {
+  public void blockReport_06() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -353,7 +356,7 @@ public class TestBlockReport {
   @Test
   // Currently this test is failing as expected 'cause the correct behavior is
   // not yet implemented (9/15/09)
-  public void blockReport_07() throws IOException {
+  public void blockReport_07() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -670,21 +673,24 @@ public class TestBlockReport {
   }
 
   private void startDNandWait(Path filePath, boolean waitReplicas) 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
+      throws IOException, InterruptedException, TimeoutException {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
     }
     cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.waitClusterUp();
     ArrayList<DataNode> datanodes = cluster.getDataNodes();
     assertEquals(datanodes.size(), 2);
 
-    if(LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       int lastDn = datanodes.size() - 1;
       LOG.debug("New datanode "
           + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + " has been started");
     }
-    if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    if (waitReplicas) {
+      DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    }
   }
 
   private ArrayList<Block> prepareForRide(final Path filePath,
@@ -836,8 +842,9 @@ public class TestBlockReport {
     public void run() {
       try {
         startDNandWait(filePath, true);
-      } catch (IOException e) {
-        LOG.warn("Shouldn't happen", e);
+      } catch (Exception e) {
+        e.printStackTrace();
+        Assert.fail("Failed to start BlockChecker: " + e);
       }
     }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
    * failure if the configuration parameter allows this.
    */
   @Test
-  public void testVolumeFailure() throws IOException {
+  public void testVolumeFailure() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     dataDir = new File(cluster.getDataDirectory());
     System.out.println("Data dir: is " +  dataDir.getPath());

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java

@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
   }
 
   // test recovering unlinked tmp replicas
-  @Test public void testRecoverReplicas() throws IOException {
+  @Test public void testRecoverReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -31,9 +31,7 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.PrintWriter;
 import java.io.RandomAccessFile;
-import java.io.StringWriter;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -1238,10 +1236,8 @@ public class TestEditLog {
         }
       } catch (IOException e) {
       } catch (Throwable t) {
-        StringWriter sw = new StringWriter();
-        t.printStackTrace(new PrintWriter(sw));
-        fail("caught non-IOException throwable with message " +
-            t.getMessage() + "\nstack trace\n" + sw.toString());
+        fail("Caught non-IOException throwable " +
+             StringUtils.stringifyException(t));
       }
     } finally {
       if ((elfos != null) && (elfos.isOpen()))

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java

@@ -116,7 +116,7 @@ public class TestFSEditLogLoader {
    * automatically bumped up to the new minimum upon restart.
    */
   @Test
-  public void testReplicationAdjusted() throws IOException {
+  public void testReplicationAdjusted() throws Exception {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
     // Replicate and heartbeat fast to shave a few seconds off test

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java

@@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (2) is equal to replication factor (2))
    */
   @Test
-  public void testWhenDecreasingReplication() throws IOException {
+  public void testWhenDecreasingReplication() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks {
    * 
    */
   @Test
-  public void testByAddingAnExtraDataNode() throws IOException {
+  public void testByAddingAnExtraDataNode() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (1) is equal to replication factor (1))
    */
   @Test
-  public void testWithReplicationFactorAsOne() throws IOException {
+  public void testWithReplicationFactorAsOne() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks {
    *    Verify that all replicas are corrupt and 3 replicas are present.
    */
   @Test
-  public void testWithAllCorruptReplicas() throws IOException {
+  public void testWithAllCorruptReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -80,7 +80,7 @@ public class TestWebHDFS {
     }
   }
 
-  @Test
+  @Test(timeout=300000)
   public void testLargeFile() throws Exception {
     largeFileTest(200L << 20); //200MB file length
   }
@@ -202,7 +202,7 @@ public class TestWebHDFS {
   }
 
   /** Test client retry with namenode restarting. */
-  @Test
+  @Test(timeout=300000)
   public void testNamenodeRestart() throws Exception {
     ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
     final Configuration conf = WebHdfsTestUtil.createConf();

+ 6 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -138,6 +138,12 @@ Release 2.0.3-alpha - Unreleased
 
   BUG FIXES
 
+    MAPREDUCE-4607. Race condition in ReduceTask completion can result in Task
+    being incorrectly failed. (Bikas Saha via tomwhite)
+
+    MAPREDUCE-4646. Fixed MR framework to send diagnostic information correctly
+    to clients in case of failed jobs also. (Jason Lowe via vinodkv)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

+ 10 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -582,17 +582,23 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       String jobFile =
           remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
 
+      StringBuilder diagsb = new StringBuilder();
+      for (String s : getDiagnostics()) {
+        diagsb.append(s).append("\n");
+      }
+
       if (getState() == JobState.NEW) {
         return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
             appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
-            cleanupProgress, jobFile, amInfos, isUber);
+            cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
       }
 
       computeProgress();
-      return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
-          appSubmitTime, startTime, finishTime, setupProgress,
+      JobReport report = MRBuilderUtils.newJobReport(jobId, jobName, username,
+          state, appSubmitTime, startTime, finishTime, setupProgress,
           this.mapProgress, this.reduceProgress,
-          cleanupProgress, jobFile, amInfos, isUber);
+          cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
+      return report;
     } finally {
       readLock.unlock();
     }

+ 33 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -71,6 +71,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
@@ -86,6 +87,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
@@ -120,6 +122,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
@@ -128,6 +131,8 @@ import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RackResolver;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Implementation of TaskAttempt interface.
  */
@@ -404,10 +409,10 @@ public abstract class TaskAttemptImpl implements
          TaskAttemptState.FAILED,
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
          new TooManyFetchFailureTransition())
-     .addTransition(
-         TaskAttemptState.SUCCEEDED, TaskAttemptState.KILLED,
-         TaskAttemptEventType.TA_KILL,
-         new KilledAfterSuccessTransition())
+      .addTransition(TaskAttemptState.SUCCEEDED,
+          EnumSet.of(TaskAttemptState.SUCCEEDED, TaskAttemptState.KILLED),
+          TaskAttemptEventType.TA_KILL, 
+          new KilledAfterSuccessTransition())
      .addTransition(
          TaskAttemptState.SUCCEEDED, TaskAttemptState.SUCCEEDED,
          TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
@@ -1483,6 +1488,9 @@ public abstract class TaskAttemptImpl implements
     @SuppressWarnings("unchecked")
     @Override
     public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
+      // too many fetch failure can only happen for map tasks
+      Preconditions
+          .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
       //add to diagnostic
       taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
       //set the finish time
@@ -1506,15 +1514,30 @@ public abstract class TaskAttemptImpl implements
   }
   
   private static class KilledAfterSuccessTransition implements
-      SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+      MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent, TaskAttemptState> {
 
     @SuppressWarnings("unchecked")
     @Override
-    public void transition(TaskAttemptImpl taskAttempt, 
+    public TaskAttemptState transition(TaskAttemptImpl taskAttempt, 
         TaskAttemptEvent event) {
-      TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
-      //add to diagnostic
-      taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
+      if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
+        // after a reduce task has succeeded, its outputs are in safe in HDFS.
+        // logically such a task should not be killed. we only come here when
+        // there is a race condition in the event queue. E.g. some logic sends
+        // a kill request to this attempt when the successful completion event
+        // for this task is already in the event queue. so the kill event will
+        // get executed immediately after the attempt is marked successful and 
+        // result in this transition being exercised.
+        // ignore this for reduce tasks
+        LOG.info("Ignoring killed event for successful reduce task attempt" +
+                  taskAttempt.getID().toString());
+        return TaskAttemptState.SUCCEEDED;
+      }
+      if(event instanceof TaskAttemptKillEvent) {
+        TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
+        //add to diagnostic
+        taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
+      }
 
       // not setting a finish time since it was set on success
       assert (taskAttempt.getFinishTime() != 0);
@@ -1528,6 +1551,7 @@ public abstract class TaskAttemptImpl implements
           .getTaskId().getJobId(), tauce));
       taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
           taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
+      return TaskAttemptState.KILLED;
     }
   }
 

+ 37 - 33
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -191,12 +191,12 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
             TaskEventType.T_ADD_SPEC_ATTEMPT))
 
     // Transitions from SUCCEEDED state
-    .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+    .addTransition(TaskState.SUCCEEDED,
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED, TaskState.FAILED),
-        TaskEventType.T_ATTEMPT_FAILED, new MapRetroactiveFailureTransition())
-    .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+        TaskEventType.T_ATTEMPT_FAILED, new RetroactiveFailureTransition())
+    .addTransition(TaskState.SUCCEEDED,
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED),
-        TaskEventType.T_ATTEMPT_KILLED, new MapRetroactiveKilledTransition())
+        TaskEventType.T_ATTEMPT_KILLED, new RetroactiveKilledTransition())
     // Ignore-able transitions.
     .addTransition(
         TaskState.SUCCEEDED, TaskState.SUCCEEDED,
@@ -897,7 +897,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
     }
   }
 
-  private static class MapRetroactiveFailureTransition
+  private static class RetroactiveFailureTransition
       extends AttemptFailedTransition {
 
     @Override
@@ -911,8 +911,8 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
           return TaskState.SUCCEEDED;
         }
       }
-      
-      //verify that this occurs only for map task
+
+      // a successful REDUCE task should not be overridden
       //TODO: consider moving it to MapTaskImpl
       if (!TaskType.MAP.equals(task.getType())) {
         LOG.error("Unexpected event for REDUCE task " + event.getType());
@@ -938,42 +938,46 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
     }
   }
 
-  private static class MapRetroactiveKilledTransition implements
+  private static class RetroactiveKilledTransition implements
     MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
 
     @Override
     public TaskState transition(TaskImpl task, TaskEvent event) {
-      // verify that this occurs only for map task
+      TaskAttemptId attemptId = null;
+      if (event instanceof TaskTAttemptEvent) {
+        TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
+        attemptId = castEvent.getTaskAttemptID(); 
+        if (task.getState() == TaskState.SUCCEEDED &&
+            !attemptId.equals(task.successfulAttempt)) {
+          // don't allow a different task attempt to override a previous
+          // succeeded state
+          return TaskState.SUCCEEDED;
+        }
+      }
+
+      // a successful REDUCE task should not be overridden
       // TODO: consider moving it to MapTaskImpl
       if (!TaskType.MAP.equals(task.getType())) {
         LOG.error("Unexpected event for REDUCE task " + event.getType());
         task.internalError(event.getType());
       }
 
-      TaskTAttemptEvent attemptEvent = (TaskTAttemptEvent) event;
-      TaskAttemptId attemptId = attemptEvent.getTaskAttemptID();
-      if(task.successfulAttempt == attemptId) {
-        // successful attempt is now killed. reschedule
-        // tell the job about the rescheduling
-        unSucceed(task);
-        task.handleTaskAttemptCompletion(
-            attemptId, 
-            TaskAttemptCompletionEventStatus.KILLED);
-        task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
-        // typically we are here because this map task was run on a bad node and 
-        // we want to reschedule it on a different node.
-        // Depending on whether there are previous failed attempts or not this 
-        // can SCHEDULE or RESCHEDULE the container allocate request. If this
-        // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
-        // from the map splitInfo. So the bad node might be sent as a location 
-        // to the RM. But the RM would ignore that just like it would ignore 
-        // currently pending container requests affinitized to bad nodes.
-        task.addAndScheduleAttempt();
-        return TaskState.SCHEDULED;
-      } else {
-        // nothing to do
-        return TaskState.SUCCEEDED;
-      }
+      // successful attempt is now killed. reschedule
+      // tell the job about the rescheduling
+      unSucceed(task);
+      task.handleTaskAttemptCompletion(attemptId,
+          TaskAttemptCompletionEventStatus.KILLED);
+      task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
+      // typically we are here because this map task was run on a bad node and
+      // we want to reschedule it on a different node.
+      // Depending on whether there are previous failed attempts or not this
+      // can SCHEDULE or RESCHEDULE the container allocate request. If this
+      // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
+      // from the map splitInfo. So the bad node might be sent as a location
+      // to the RM. But the RM would ignore that just like it would ignore
+      // currently pending container requests affinitized to bad nodes.
+      task.addAndScheduleAttempt();
+      return TaskState.SCHEDULED;
     }
   }
 

+ 28 - 10
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java

@@ -180,7 +180,7 @@ public class TestMRApp {
   @Test
   public void testUpdatedNodes() throws Exception {
     int runCount = 0;
-    MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(),
+    MRApp app = new MRAppWithHistory(2, 2, false, this.getClass().getName(),
         true, ++runCount);
     Configuration conf = new Configuration();
     // after half of the map completion, reduce will start
@@ -189,7 +189,7 @@ public class TestMRApp {
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     Job job = app.submit(conf);
     app.waitForState(job, JobState.RUNNING);
-    Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
+    Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
     Iterator<Task> it = job.getTasks().values().iterator();
     Task mapTask1 = it.next();
     Task mapTask2 = it.next();
@@ -272,18 +272,19 @@ public class TestMRApp {
 
     // rerun
     // in rerun the 1st map will be recovered from previous run
-    app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
+    app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), false,
         ++runCount);
     conf = new Configuration();
     conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     job = app.submit(conf);
     app.waitForState(job, JobState.RUNNING);
-    Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
+    Assert.assertEquals("No of tasks not correct", 4, job.getTasks().size());
     it = job.getTasks().values().iterator();
     mapTask1 = it.next();
     mapTask2 = it.next();
-    Task reduceTask = it.next();
+    Task reduceTask1 = it.next();
+    Task reduceTask2 = it.next();
 
     // map 1 will be recovered, no need to send done
     app.waitForState(mapTask1, TaskState.SUCCEEDED);
@@ -306,19 +307,36 @@ public class TestMRApp {
     Assert.assertEquals("Expecting 1 more completion events for success", 3,
         events.length);
 
-    app.waitForState(reduceTask, TaskState.RUNNING);
-    TaskAttempt task3Attempt = reduceTask.getAttempts().values().iterator()
+    app.waitForState(reduceTask1, TaskState.RUNNING);
+    app.waitForState(reduceTask2, TaskState.RUNNING);
+
+    TaskAttempt task3Attempt = reduceTask1.getAttempts().values().iterator()
         .next();
     app.getContext()
         .getEventHandler()
         .handle(
             new TaskAttemptEvent(task3Attempt.getID(),
                 TaskAttemptEventType.TA_DONE));
-    app.waitForState(reduceTask, TaskState.SUCCEEDED);
+    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+    app.getContext()
+    .getEventHandler()
+    .handle(
+        new TaskAttemptEvent(task3Attempt.getID(),
+            TaskAttemptEventType.TA_KILL));
+    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+    
+    TaskAttempt task4Attempt = reduceTask2.getAttempts().values().iterator()
+        .next();
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task4Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+    app.waitForState(reduceTask2, TaskState.SUCCEEDED);    
 
     events = job.getTaskAttemptCompletionEvents(0, 100);
-    Assert.assertEquals("Expecting 1 more completion events for success", 4,
-        events.length);
+    Assert.assertEquals("Expecting 2 more completion events for reduce success",
+        5, events.length);
 
     // job succeeds
     app.waitForState(job, JobState.SUCCEEDED);

+ 7 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java

@@ -138,7 +138,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
@@ -215,7 +215,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
@@ -281,7 +281,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
@@ -723,7 +723,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
@@ -827,7 +827,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator =
         new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
 
@@ -993,7 +993,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
 
@@ -1445,7 +1445,7 @@ public class TestRMContainerAllocator {
     Job job = mock(Job.class);
     when(job.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     doReturn(10).when(job).getTotalMaps();
     doReturn(10).when(job).getTotalReduces();
     doReturn(0).when(job).getCompletedMaps();

+ 40 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java

@@ -45,11 +45,14 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.SystemClock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.util.Records;
@@ -172,6 +175,8 @@ public class TestJobImpl {
     t.testCheckJobCompleteSuccess();
     t.testCheckJobCompleteSuccessFailed();
     t.testCheckAccess();
+    t.testReportDiagnostics();
+    t.testUberDecision();
   }
 
   @Test
@@ -241,6 +246,41 @@ public class TestJobImpl {
     Assert.assertTrue(job5.checkAccess(ugi1, null));
     Assert.assertTrue(job5.checkAccess(ugi2, null));
   }
+
+  @Test
+  public void testReportDiagnostics() throws Exception {
+    JobID jobID = JobID.forName("job_1234567890000_0001");
+    JobId jobId = TypeConverter.toYarn(jobID);
+    final String diagMsg = "some diagnostic message";
+    final JobDiagnosticsUpdateEvent diagUpdateEvent =
+        new JobDiagnosticsUpdateEvent(jobId, diagMsg);
+    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
+    JobImpl job = new JobImpl(jobId, Records
+        .newRecord(ApplicationAttemptId.class), new Configuration(),
+        mock(EventHandler.class),
+        null, mock(JobTokenSecretManager.class), null,
+        new SystemClock(), null,
+        mrAppMetrics, mock(OutputCommitter.class),
+        true, null, 0, null, null);
+    job.handle(diagUpdateEvent);
+    String diagnostics = job.getReport().getDiagnostics();
+    Assert.assertNotNull(diagnostics);
+    Assert.assertTrue(diagnostics.contains(diagMsg));
+
+    job = new JobImpl(jobId, Records
+        .newRecord(ApplicationAttemptId.class), new Configuration(),
+        mock(EventHandler.class),
+        null, mock(JobTokenSecretManager.class), null,
+        new SystemClock(), null,
+        mrAppMetrics, mock(OutputCommitter.class),
+        true, null, 0, null, null);
+    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
+    job.handle(diagUpdateEvent);
+    diagnostics = job.getReport().getDiagnostics();
+    Assert.assertNotNull(diagnostics);
+    Assert.assertTrue(diagnostics.contains(diagMsg));
+  }
+
   @Test
   public void testUberDecision() throws Exception {
 

+ 54 - 15
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java

@@ -84,7 +84,6 @@ public class TestTaskImpl {
   private ApplicationId appId;
   private TaskSplitMetaInfo taskSplitMetaInfo;  
   private String[] dataLocations = new String[0]; 
-  private final TaskType taskType = TaskType.MAP;
   private AppContext appContext;
   
   private int startCount = 0;
@@ -97,6 +96,7 @@ public class TestTaskImpl {
   private class MockTaskImpl extends TaskImpl {
         
     private int taskAttemptCounter = 0;
+    TaskType taskType;
 
     public MockTaskImpl(JobId jobId, int partition,
         EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
@@ -104,11 +104,12 @@ public class TestTaskImpl {
         Token<JobTokenIdentifier> jobToken,
         Credentials credentials, Clock clock,
         Map<TaskId, TaskInfo> completedTasksFromPreviousRun, int startCount,
-        MRAppMetrics metrics, AppContext appContext) {
+        MRAppMetrics metrics, AppContext appContext, TaskType taskType) {
       super(jobId, taskType , partition, eventHandler,
           remoteJobConfFile, conf, taskAttemptListener, committer, 
           jobToken, credentials, clock,
           completedTasksFromPreviousRun, startCount, metrics, appContext);
+      this.taskType = taskType;
     }
 
     @Override
@@ -120,7 +121,7 @@ public class TestTaskImpl {
     protected TaskAttemptImpl createAttempt() {
       MockTaskAttemptImpl attempt = new MockTaskAttemptImpl(getID(), ++taskAttemptCounter, 
           eventHandler, taskAttemptListener, remoteJobConfFile, partition,
-          conf, committer, jobToken, credentials, clock, appContext);
+          conf, committer, jobToken, credentials, clock, appContext, taskType);
       taskAttempts.add(attempt);
       return attempt;
     }
@@ -142,18 +143,20 @@ public class TestTaskImpl {
     private float progress = 0;
     private TaskAttemptState state = TaskAttemptState.NEW;
     private TaskAttemptId attemptId;
+    private TaskType taskType;
 
     public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
         TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
         JobConf conf, OutputCommitter committer,
         Token<JobTokenIdentifier> jobToken,
         Credentials credentials, Clock clock,
-        AppContext appContext) {
+        AppContext appContext, TaskType taskType) {
       super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
           dataLocations, committer, jobToken, credentials, clock, appContext);
       attemptId = Records.newRecord(TaskAttemptId.class);
       attemptId.setId(id);
       attemptId.setTaskId(taskId);
+      this.taskType = taskType;
     }
 
     public TaskAttemptId getAttemptId() {
@@ -162,7 +165,7 @@ public class TestTaskImpl {
     
     @Override
     protected Task createRemoteTask() {
-      return new MockTask();
+      return new MockTask(taskType);
     }    
     
     public float getProgress() {
@@ -185,6 +188,11 @@ public class TestTaskImpl {
   
   private class MockTask extends Task {
 
+    private TaskType taskType;
+    MockTask(TaskType taskType) {
+      this.taskType = taskType;
+    }
+    
     @Override
     public void run(JobConf job, TaskUmbilicalProtocol umbilical)
         throws IOException, ClassNotFoundException, InterruptedException {
@@ -193,7 +201,7 @@ public class TestTaskImpl {
 
     @Override
     public boolean isMapTask() {
-      return true;
+      return (taskType == TaskType.MAP);
     }    
     
   }
@@ -227,14 +235,15 @@ public class TestTaskImpl {
     taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
     when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
     
-    taskAttempts = new ArrayList<MockTaskAttemptImpl>();
-    
-    mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
+    taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
+  }
+  
+  private MockTaskImpl createMockTask(TaskType taskType) {
+    return new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
         remoteJobConfFile, conf, taskAttemptListener, committer, jobToken,
         credentials, clock,
         completedTasksFromPreviousRun, startCount,
-        metrics, appContext);        
-    
+        metrics, appContext, taskType);
   }
 
   @After 
@@ -342,6 +351,7 @@ public class TestTaskImpl {
   @Test
   public void testInit() {
     LOG.info("--- START: testInit ---");
+    mockTask = createMockTask(TaskType.MAP);        
     assertTaskNewState();
     assert(taskAttempts.size() == 0);
   }
@@ -352,6 +362,7 @@ public class TestTaskImpl {
    */
   public void testScheduleTask() {
     LOG.info("--- START: testScheduleTask ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
   }
@@ -362,6 +373,7 @@ public class TestTaskImpl {
    */
   public void testKillScheduledTask() {
     LOG.info("--- START: testKillScheduledTask ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     killTask(taskId);
@@ -374,6 +386,7 @@ public class TestTaskImpl {
    */
   public void testKillScheduledTaskAttempt() {
     LOG.info("--- START: testKillScheduledTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     killScheduledTaskAttempt(getLastAttempt().getAttemptId());
@@ -386,6 +399,7 @@ public class TestTaskImpl {
    */
   public void testLaunchTaskAttempt() {
     LOG.info("--- START: testLaunchTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -398,6 +412,7 @@ public class TestTaskImpl {
    */
   public void testKillRunningTaskAttempt() {
     LOG.info("--- START: testKillRunningTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -407,6 +422,7 @@ public class TestTaskImpl {
   @Test 
   public void testTaskProgress() {
     LOG.info("--- START: testTaskProgress ---");
+    mockTask = createMockTask(TaskType.MAP);        
         
     // launch task
     TaskId taskId = getNewTaskID();
@@ -444,6 +460,7 @@ public class TestTaskImpl {
   
   @Test
   public void testFailureDuringTaskAttemptCommit() {
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -469,8 +486,7 @@ public class TestTaskImpl {
     assertTaskSucceededState();
   }
   
-  @Test
-  public void testSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+  private void runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType failEvent) {
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -489,11 +505,34 @@ public class TestTaskImpl {
     
     // Now fail the first task attempt, after the second has succeeded
     mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(0).getAttemptId(), 
-        TaskEventType.T_ATTEMPT_FAILED));
+        failEvent));
     
     // The task should still be in the succeeded state
     assertTaskSucceededState();
-    
+  }
+  
+  @Test
+  public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+    mockTask = createMockTask(TaskType.MAP);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_FAILED);
+  }
+
+  @Test
+  public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+    mockTask = createMockTask(TaskType.REDUCE);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_FAILED);
+  }
+  
+  @Test
+  public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
+    mockTask = createMockTask(TaskType.MAP);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_KILLED);
+  }
+
+  @Test
+  public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
+    mockTask = createMockTask(TaskType.REDUCE);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_KILLED);
   }
 
 }

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java

@@ -67,7 +67,7 @@ public class MRBuilderUtils {
       String userName, JobState state, long submitTime, long startTime, long finishTime,
       float setupProgress, float mapProgress, float reduceProgress,
       float cleanupProgress, String jobFile, List<AMInfo> amInfos,
-      boolean isUber) {
+      boolean isUber, String diagnostics) {
     JobReport report = Records.newRecord(JobReport.class);
     report.setJobId(jobId);
     report.setJobName(jobName);
@@ -83,6 +83,7 @@ public class MRBuilderUtils {
     report.setJobFile(jobFile);
     report.setAMInfos(amInfos);
     report.setIsUber(isUber);
+    report.setDiagnostics(diagnostics);
     return report;
   }
 

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java

@@ -520,5 +520,10 @@ public class ConfigUtil {
         MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT   
     });
   }
+
+  public static void main(String[] args) {
+    loadResources();
+    Configuration.dumpDeprecatedKeys();
+  }
 }
 

+ 4 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -219,7 +219,8 @@ public class TestClientServiceDelegate {
     GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class);
     when(jobReportResponse1.getJobReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user",
-            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false));
+            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
+            false, ""));
 
     // First AM returns a report with jobName firstGen and simulates AM shutdown
     // on second invocation.
@@ -231,7 +232,8 @@ public class TestClientServiceDelegate {
     GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class);
     when(jobReportResponse2.getJobReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user",
-            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false));
+            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
+            false, ""));
 
     // Second AM generation returns a report with jobName secondGen
     MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class);

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java

@@ -23,6 +23,7 @@ import static org.mockito.Mockito.when;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 
 import junit.framework.TestCase;
 
@@ -95,7 +96,7 @@ public class TestFileInputFormat extends TestCase {
   }
 
   private void createInputs(FileSystem fs, Path inDir, String fileName)
-  throws IOException {
+      throws IOException, TimeoutException, InterruptedException {
     // create a multi-block file on hdfs
     Path path = new Path(inDir, fileName);
     final short replication = 2;
@@ -157,7 +158,7 @@ public class TestFileInputFormat extends TestCase {
     }
   }
 
-  public void testMultiLevelInput() throws IOException {
+  public void testMultiLevelInput() throws Exception {
     JobConf job = new JobConf(conf);
 
     job.setBoolean("dfs.replication.considerLoad", false);
@@ -291,7 +292,8 @@ public class TestFileInputFormat extends TestCase {
   }
 
   static void writeFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+      short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
 
     FSDataOutputStream stm = fileSys.create(name, true,

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java

@@ -71,13 +71,13 @@ public class TestMultipleLevelCaching extends TestCase {
     return rack.toString();
   }
 
-  public void testMultiLevelCaching() throws IOException {
+  public void testMultiLevelCaching() throws Exception {
     for (int i = 1 ; i <= MAX_LEVEL; ++i) {
       testCachingAtLevel(i);
     }
   }
 
-  private void testCachingAtLevel(int level) throws IOException {
+  private void testCachingAtLevel(int level) throws Exception {
     String namenode = null;
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;

+ 21 - 12
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java

@@ -31,6 +31,7 @@ import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -449,11 +450,14 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           String mapSignalFile, 
                           String reduceSignalFile, int replication) 
-  throws IOException {
-    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
-              (short)replication);
-    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), 
-              (short)replication);
+      throws IOException, TimeoutException {
+    try {
+      writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
+                (short)replication);
+      writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), (short)replication);
+    } catch (InterruptedException ie) {
+      // Ignore
+    }
   }
   
   /**
@@ -462,12 +466,16 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           boolean isMap, String mapSignalFile, 
                           String reduceSignalFile)
-  throws IOException {
-    //  signal the maps to complete
-    writeFile(dfs.getNameNode(), fileSys.getConf(),
-              isMap 
-              ? new Path(mapSignalFile)
-              : new Path(reduceSignalFile), (short)1);
+      throws IOException, TimeoutException {
+    try {
+      //  signal the maps to complete
+      writeFile(dfs.getNameNode(), fileSys.getConf(),
+                isMap 
+                ? new Path(mapSignalFile)
+                : new Path(reduceSignalFile), (short)1);
+    } catch (InterruptedException ie) {
+      // Ignore
+    }
   }
   
   static String getSignalFile(Path dir) {
@@ -483,7 +491,8 @@ public class UtilsForTests {
   }
   
   static void writeFile(NameNode namenode, Configuration conf, Path name, 
-      short replication) throws IOException {
+                        short replication)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
     SequenceFile.Writer writer = 
       SequenceFile.createWriter(fileSys, conf, name, 

+ 10 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java

@@ -23,6 +23,7 @@ import java.net.URI;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.zip.GZIPOutputStream;
+import java.util.concurrent.TimeoutException;
 
 import junit.framework.TestCase;
 
@@ -278,7 +279,7 @@ public class TestCombineFileInputFormat extends TestCase {
     assertFalse(rr.nextKeyValue());
   }
 
-  public void testSplitPlacement() throws IOException {
+  public void testSplitPlacement() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
     try {
@@ -678,7 +679,8 @@ public class TestCombineFileInputFormat extends TestCase {
   }
 
   static void writeFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+                        short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
 
     FSDataOutputStream stm = fileSys.create(name, true,
@@ -689,7 +691,8 @@ public class TestCombineFileInputFormat extends TestCase {
 
   // Creates the gzip file and return the FileStatus
   static FileStatus writeGzipFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+      short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
 
     GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
@@ -699,7 +702,8 @@ public class TestCombineFileInputFormat extends TestCase {
   }
 
   private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
-      OutputStream out, short replication, int numBlocks) throws IOException {
+        OutputStream out, short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     for (int i = 0; i < numBlocks; i++) {
       out.write(databuf);
     }
@@ -707,7 +711,7 @@ public class TestCombineFileInputFormat extends TestCase {
     DFSTestUtil.waitReplication(fileSys, name, replication);
   }
   
-  public void testSplitPlacementForCompressedFiles() throws IOException {
+  public void testSplitPlacementForCompressedFiles() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
     try {
@@ -1058,7 +1062,7 @@ public class TestCombineFileInputFormat extends TestCase {
   /**
    * Test that CFIF can handle missing blocks.
    */
-  public void testMissingBlocks() throws IOException {
+  public void testMissingBlocks() throws Exception {
     String namenode = null;
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;

+ 12 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml

@@ -172,6 +172,18 @@
           <effort>Max</effort>
        </configuration>
      </plugin>
+     <plugin>
+       <groupId>org.apache.maven.plugins</groupId>
+       <artifactId>maven-surefire-plugin</artifactId>
+       <configuration>
+         <properties>
+           <property>
+             <name>listener</name>
+             <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+           </property>
+         </properties>
+       </configuration>
+     </plugin>
     </plugins>
   </build>
  

+ 12 - 0
hadoop-mapreduce-project/pom.xml

@@ -220,6 +220,18 @@
           </includes>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 

+ 3 - 3
hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java

@@ -52,11 +52,11 @@ public class TestHadoopArchives extends TestCase {
 
   {
     ((Log4JLogger)LogFactory.getLog(org.apache.hadoop.security.Groups.class)
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)org.apache.hadoop.ipc.Server.LOG
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)org.apache.hadoop.util.AsyncDiskService.LOG
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
   }
 
   private static final String inputDir = "input";

+ 3 - 3
hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java

@@ -61,9 +61,9 @@ import org.junit.Ignore;
 public class TestCopyFiles extends TestCase {
   {
     ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
   }
   

Some files were not shown because too many files changed in this diff