Explorar o código

Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1387449 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon %!s(int64=12) %!d(string=hai) anos
pai
achega
e9f4de5ced
Modificáronse 100 ficheiros con 1789 adicións e 475 borrados
  1. 41 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 12 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  3. 1 1
      hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh
  4. 5 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  5. 0 3
      hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
  6. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  7. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  8. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  9. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
  10. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java
  11. 88 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  12. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  13. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  14. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  15. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
  16. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocolPB.java
  17. 35 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/client/GetUserMappingsProtocolPBClientImpl.java
  18. 20 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/service/GetUserMappingsProtocolPBServiceImpl.java
  19. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
  20. 3 3
      hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
  21. 70 60
      hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
  22. 193 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
  23. 23 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  24. 181 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
  25. 166 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
  26. 6 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
  28. 0 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
  29. 22 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
  30. 33 1
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  31. 6 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  32. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
  33. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
  34. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  35. 37 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  36. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  37. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  38. 19 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  39. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
  40. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  41. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  42. 35 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  43. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  44. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  45. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
  46. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  47. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  48. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  49. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  50. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  51. 22 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  52. 0 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  53. 53 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  54. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  55. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  56. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp
  57. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
  58. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp
  59. 27 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  60. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  61. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
  62. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
  63. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  64. 16 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  65. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
  66. 164 41
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
  67. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
  68. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
  69. 25 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  70. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  71. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
  72. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  73. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  74. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
  75. 16 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
  76. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  77. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
  78. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  79. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
  80. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
  81. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  82. 6 0
      hadoop-mapreduce-project/CHANGES.txt
  83. 10 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  84. 33 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  85. 37 33
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  86. 28 10
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
  87. 7 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
  88. 40 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
  89. 54 15
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
  90. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
  91. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
  92. 4 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  93. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
  94. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
  95. 21 12
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
  96. 10 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
  97. 12 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
  98. 12 0
      hadoop-mapreduce-project/pom.xml
  99. 3 3
      hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
  100. 3 3
      hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java

+ 41 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -202,6 +202,18 @@ Trunk (Unreleased)
     HADOOP-8684. Deadlock between WritableComparator and WritableComparable.
     HADOOP-8684. Deadlock between WritableComparator and WritableComparable.
     (Jing Zhao via suresh)
     (Jing Zhao via suresh)
 
 
+    HADOOP-8786. HttpServer continues to start even if AuthenticationFilter
+    fails to init (todd)
+
+    HADOOP-8767. Secondary namenode is started on slave nodes instead of
+    master nodes. (Giovanni Delussu via suresh)
+
+    HADOOP-8818. Use equals instead == in MD5MD5CRC32FileChecksum
+    and TFileDumper. (Brandon Li via suresh)
+
+    HADOOP-8821. Fix findbugs warning related to concatenating string in a 
+    for loop in Configuration#dumpDeprecatedKeys(). (suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -212,12 +224,34 @@ Release 2.0.3-alpha - Unreleased
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+    HADOOP-8597. Permit FsShell's text command to read Avro files.
+    (Ivan Vladimirov Ivanov via cutting)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
+    (Andy Isaacson via eli)
+
+    HADOOP-8755. Print thread dump when tests fail due to timeout. (Andrey
+    Klochkov via atm)
+
+    HADOOP-8806. libhadoop.so: dlopen should be better at locating
+    libsnappy.so, etc. (Colin Patrick McCabe via eli)
+
+    HADOOP-8812. ExitUtil#terminate should print Exception#toString. (eli)
+
+    HADOOP-8805. Move protocol buffer implementation of GetUserMappingProtocol from HDFS to Common. (bowang via tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to
+    executable is specified. (Sean Mackrory via atm)
+
+    HADOOP-8780. Update DeprecatedProperties apt file. (Ahmed Radwan via
+    tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -343,6 +377,11 @@ Release 2.0.2-alpha - 2012-09-07
     HADOOP-8754. Deprecate all the RPC.getServer() variants.  (Brandon Li
     HADOOP-8754. Deprecate all the RPC.getServer() variants.  (Brandon Li
     via szetszwo)
     via szetszwo)
 
 
+    HADOOP-8801. ExitUtil#terminate should capture the exception stack trace. (eli)
+
+    HADOOP-8819. Incorrectly & is used instead of && in some file system 
+    implementations. (Brandon Li via suresh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -497,6 +536,8 @@ Release 2.0.2-alpha - 2012-09-07
     HADOOP-8775. MR2 distcp permits non-positive value to -bandwidth option
     HADOOP-8775. MR2 distcp permits non-positive value to -bandwidth option
     which causes job never to complete. (Sandy Ryza via atm)
     which causes job never to complete. (Sandy Ryza via atm)
 
 
+    HADOOP-8781. hadoop-config.sh should add JAVA_LIBRARY_PATH to LD_LIBRARY_PATH. (tucu)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active

+ 12 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -109,6 +109,7 @@ add_executable(test_bulk_crc32
 )
 )
 set_property(SOURCE main.cpp PROPERTY INCLUDE_DIRECTORIES "\"-Werror\" \"-Wall\"")
 set_property(SOURCE main.cpp PROPERTY INCLUDE_DIRECTORIES "\"-Werror\" \"-Wall\"")
 
 
+SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
 add_dual_library(hadoop
 add_dual_library(hadoop
     ${D}/io/compress/lz4/Lz4Compressor.c
     ${D}/io/compress/lz4/Lz4Compressor.c
     ${D}/io/compress/lz4/Lz4Decompressor.c
     ${D}/io/compress/lz4/Lz4Decompressor.c
@@ -125,6 +126,17 @@ add_dual_library(hadoop
     ${D}/util/NativeCrc32.c
     ${D}/util/NativeCrc32.c
     ${D}/util/bulk_crc32.c
     ${D}/util/bulk_crc32.c
 )
 )
+
+IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+    #
+    # By embedding '$ORIGIN' into the RPATH of libhadoop.so,
+    # dlopen will look in the directory containing libhadoop.so.
+    # However, $ORIGIN is not supported by all operating systems.
+    #
+    SET_TARGET_PROPERTIES(hadoop 
+        PROPERTIES INSTALL_RPATH "\$ORIGIN/")
+ENDIF()
+
 target_link_dual_libraries(hadoop
 target_link_dual_libraries(hadoop
     dl
     dl
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}

+ 1 - 1
hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh

@@ -26,7 +26,7 @@ _hadoop() {
   COMPREPLY=()
   COMPREPLY=()
   cur=${COMP_WORDS[COMP_CWORD]}
   cur=${COMP_WORDS[COMP_CWORD]}
   prev=${COMP_WORDS[COMP_CWORD-1]}  
   prev=${COMP_WORDS[COMP_CWORD-1]}  
-  script=${COMP_WORDS[0]}  
+  script=`which ${COMP_WORDS[0]}`
   
   
   # Bash lets you tab complete things even if the script doesn't
   # Bash lets you tab complete things even if the script doesn't
   # exist (or isn't executable). Check to make sure it is, as we
   # exist (or isn't executable). Check to make sure it is, as we

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -74,6 +74,10 @@ fi
 
 
 export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"
 export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"
 
 
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
 # User can specify hostnames or a file where the hostnames are (not both)
 # User can specify hostnames or a file where the hostnames are (not both)
 if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
 if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
   echo \
   echo \
@@ -113,9 +117,6 @@ case "`uname`" in
 CYGWIN*) cygwin=true;;
 CYGWIN*) cygwin=true;;
 esac
 esac
 
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
 
 
 # check if net.ipv6.bindv6only is set to 1
 # check if net.ipv6.bindv6only is set to 1
 bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
 bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
@@ -243,6 +244,7 @@ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
   HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
   HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH
 fi  
 fi  
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
 HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
 
 

+ 0 - 3
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh

@@ -42,9 +42,6 @@ DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
 
 
 # Where to start the script, see hadoop-config.sh
 # Where to start the script, see hadoop-config.sh
 # (it set up the variables based on command line options)
 # (it set up the variables based on command line options)

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -2332,7 +2332,17 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   
   
   /**
   /**
    * A unique class which is used as a sentinel value in the caching
    * A unique class which is used as a sentinel value in the caching
-   * for getClassByName. {@see Configuration#getClassByNameOrNull(String)}
+   * for getClassByName. {@link Configuration#getClassByNameOrNull(String)}
    */
    */
   private static abstract class NegativeCacheSentinel {}
   private static abstract class NegativeCacheSentinel {}
+
+  public static void dumpDeprecatedKeys() {
+    for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecatedKeyMap.entrySet()) {
+      StringBuilder newKeys = new StringBuilder();
+      for (String newKey : entry.getValue().newKeys) {
+        newKeys.append(newKey).append("\t");
+      }
+      System.out.println(entry.getKey() + "\t" + newKeys.toString());
+    }
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -133,7 +133,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
 
 
     try {
     try {
       // old versions don't support crcType.
       // old versions don't support crcType.
-      if (crcType == null || crcType == "") {
+      if (crcType == null || crcType.equals("")) {
         finalCrcType = DataChecksum.Type.CRC32;
         finalCrcType = DataChecksum.Type.CRC32;
       } else {
       } else {
         finalCrcType = DataChecksum.Type.valueOf(crcType);
         finalCrcType = DataChecksum.Type.valueOf(crcType);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -488,7 +488,7 @@ public class FTPFileSystem extends FileSystem {
       if (created) {
       if (created) {
         String parentDir = parent.toUri().getPath();
         String parentDir = parent.toUri().getPath();
         client.changeWorkingDirectory(parentDir);
         client.changeWorkingDirectory(parentDir);
-        created = created & client.makeDirectory(pathName);
+        created = created && client.makeDirectory(pathName);
       }
       }
     } else if (isFile(client, absolute)) {
     } else if (isFile(client, absolute)) {
       throw new IOException(String.format(
       throw new IOException(String.format(

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java

@@ -77,7 +77,7 @@ public class FTPInputStream extends FSInputStream {
     if (byteRead >= 0) {
     if (byteRead >= 0) {
       pos++;
       pos++;
     }
     }
-    if (stats != null & byteRead >= 0) {
+    if (stats != null && byteRead >= 0) {
       stats.incrementBytesRead(1);
       stats.incrementBytesRead(1);
     }
     }
     return byteRead;
     return byteRead;
@@ -93,7 +93,7 @@ public class FTPInputStream extends FSInputStream {
     if (result > 0) {
     if (result > 0) {
       pos += result;
       pos += result;
     }
     }
-    if (stats != null & result > 0) {
+    if (stats != null && result > 0) {
       stats.incrementBytesRead(result);
       stats.incrementBytesRead(result);
     }
     }
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -113,7 +113,7 @@ class S3InputStream extends FSInputStream {
         pos++;
         pos++;
       }
       }
     }
     }
-    if (stats != null & result >= 0) {
+    if (stats != null && result >= 0) {
       stats.incrementBytesRead(1);
       stats.incrementBytesRead(1);
     }
     }
     return result;
     return result;

+ 88 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -17,11 +17,21 @@
  */
  */
 package org.apache.hadoop.fs.shell;
 package org.apache.hadoop.fs.shell;
 
 
-import java.io.IOException;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.InputStream;
 import java.io.InputStream;
+import java.io.IOException;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.GZIPInputStream;
 
 
+import org.apache.avro.file.DataFileReader;
+import org.apache.avro.file.FileReader;
+import org.apache.avro.generic.GenericDatumReader;
+import org.apache.avro.generic.GenericDatumWriter;
+import org.apache.avro.io.DatumWriter;
+import org.apache.avro.io.EncoderFactory;
+import org.apache.avro.io.JsonEncoder;
+import org.apache.avro.Schema;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -37,6 +47,10 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.codehaus.jackson.JsonEncoding;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.util.MinimalPrettyPrinter;
 
 
 /**
 /**
  * Display contents of files 
  * Display contents of files 
@@ -95,14 +109,14 @@ class Display extends FsCommand {
   
   
   /**
   /**
    * Same behavior as "-cat", but handles zip and TextRecordInputStream
    * Same behavior as "-cat", but handles zip and TextRecordInputStream
-   * encodings. 
+   * and Avro encodings. 
    */ 
    */ 
   public static class Text extends Cat {
   public static class Text extends Cat {
     public static final String NAME = "text";
     public static final String NAME = "text";
     public static final String USAGE = Cat.USAGE;
     public static final String USAGE = Cat.USAGE;
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
       "Takes a source file and outputs the file in text format.\n" +
       "Takes a source file and outputs the file in text format.\n" +
-      "The allowed formats are zip and TextRecordInputStream.";
+      "The allowed formats are zip and TextRecordInputStream and Avro.";
     
     
     @Override
     @Override
     protected InputStream getInputStream(PathData item) throws IOException {
     protected InputStream getInputStream(PathData item) throws IOException {
@@ -132,6 +146,13 @@ class Display extends FsCommand {
           }
           }
           break;
           break;
         }
         }
+        case 0x4f62: { // 'O' 'b'
+          if (i.readByte() == 'j') {
+            i.close();
+            return new AvroFileInputStream(item.stat);
+          }
+          break;
+        }
       }
       }
 
 
       // File is non-compressed, or not a file container we know.
       // File is non-compressed, or not a file container we know.
@@ -187,4 +208,68 @@ class Display extends FsCommand {
       super.close();
       super.close();
     }
     }
   }
   }
+
+  /**
+   * This class transforms a binary Avro data file into an InputStream
+   * with data that is in a human readable JSON format.
+   */
+  protected static class AvroFileInputStream extends InputStream {
+    private int pos;
+    private byte[] buffer;
+    private ByteArrayOutputStream output;
+    private FileReader fileReader;
+    private DatumWriter<Object> writer;
+    private JsonEncoder encoder;
+
+    public AvroFileInputStream(FileStatus status) throws IOException {
+      pos = 0;
+      buffer = new byte[0];
+      GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
+      fileReader =
+        DataFileReader.openReader(new File(status.getPath().toUri()), reader);
+      Schema schema = fileReader.getSchema();
+      writer = new GenericDatumWriter<Object>(schema);
+      output = new ByteArrayOutputStream();
+      JsonGenerator generator =
+        new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8);
+      MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter();
+      prettyPrinter.setRootValueSeparator(System.getProperty("line.separator"));
+      generator.setPrettyPrinter(prettyPrinter);
+      encoder = EncoderFactory.get().jsonEncoder(schema, generator);
+    }
+
+    /**
+     * Read a single byte from the stream.
+     */
+    @Override
+    public int read() throws IOException {
+      if (pos < buffer.length) {
+        return buffer[pos++];
+      }
+      if (!fileReader.hasNext()) {
+        return -1;
+      }
+      writer.write(fileReader.next(), encoder);
+      encoder.flush();
+      if (!fileReader.hasNext()) {
+        // Write a new line after the last Avro record.
+        output.write(System.getProperty("line.separator").getBytes());
+        output.flush();
+      }
+      pos = 0;
+      buffer = output.toByteArray();
+      output.reset();
+      return read();
+    }
+
+    /**
+      * Close the stream.
+      */
+    @Override
+    public void close() throws IOException {
+      fileReader.close();
+      output.close();
+      super.close();
+    }
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -707,7 +707,7 @@ public class ViewFileSystem extends FileSystem {
     @Override
     @Override
     public boolean mkdirs(Path dir, FsPermission permission)
     public boolean mkdirs(Path dir, FsPermission permission)
         throws AccessControlException, FileAlreadyExistsException {
         throws AccessControlException, FileAlreadyExistsException {
-      if (theInternalDir.isRoot & dir == null) {
+      if (theInternalDir.isRoot && dir == null) {
         throw new FileAlreadyExistsException("/ already exits");
         throw new FileAlreadyExistsException("/ already exits");
       }
       }
       // Note dir starts with /
       // Note dir starts with /

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -750,7 +750,7 @@ public class ViewFs extends AbstractFileSystem {
     public void mkdir(final Path dir, final FsPermission permission,
     public void mkdir(final Path dir, final FsPermission permission,
         final boolean createParent) throws AccessControlException,
         final boolean createParent) throws AccessControlException,
         FileAlreadyExistsException {
         FileAlreadyExistsException {
-      if (theInternalDir.isRoot & dir == null) {
+      if (theInternalDir.isRoot && dir == null) {
         throw new FileAlreadyExistsException("/ already exits");
         throw new FileAlreadyExistsException("/ already exits");
       }
       }
       throw readOnlyMountTable("mkdir", dir);
       throw readOnlyMountTable("mkdir", dir);

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -677,6 +677,15 @@ public class HttpServer implements FilterContainer {
               "Problem in starting http server. Server handlers failed");
               "Problem in starting http server. Server handlers failed");
         }
         }
       }
       }
+      // Make sure there are no errors initializing the context.
+      Throwable unavailableException = webAppContext.getUnavailableException();
+      if (unavailableException != null) {
+        // Have to stop the webserver, or else its non-daemon threads
+        // will hang forever.
+        webServer.stop();
+        throw new IOException("Unable to initialize WebAppContext",
+            unavailableException);
+      }
     } catch (IOException e) {
     } catch (IOException e) {
       throw e;
       throw e;
     } catch (InterruptedException e) {
     } catch (InterruptedException e) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java

@@ -125,7 +125,7 @@ class TFileDumper {
           dataSizeUncompressed += region.getRawSize();
           dataSizeUncompressed += region.getRawSize();
         }
         }
         properties.put("Data Block Bytes", Long.toString(dataSize));
         properties.put("Data Block Bytes", Long.toString(dataSize));
-        if (reader.readerBCF.getDefaultCompressionName() != "none") {
+        if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
           properties.put("Data Block Uncompressed Bytes", Long
           properties.put("Data Block Uncompressed Bytes", Long
               .toString(dataSizeUncompressed));
               .toString(dataSizeUncompressed));
           properties.put("Data Block Compression Ratio", String.format(
           properties.put("Data Block Compression Ratio", String.format(

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocolPB.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  * with the License.  You may obtain a copy of the License at
  *
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  *
  * Unless required by applicable law or agreed to in writing, software
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,21 +16,21 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetUserMappingsProtocolService;
 
 
 @KerberosInfo(
 @KerberosInfo(
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
 @ProtocolInfo(
 @ProtocolInfo(
-    protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol", 
+    protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
     protocolVersion = 1)
     protocolVersion = 1)
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "YARN"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public interface GetUserMappingsProtocolPB extends
 public interface GetUserMappingsProtocolPB extends
   GetUserMappingsProtocolService.BlockingInterface {
   GetUserMappingsProtocolService.BlockingInterface {

+ 35 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/client/GetUserMappingsProtocolPBClientImpl.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  * with the License.  You may obtain a copy of the License at
  *
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  *
  * Unless required by applicable law or agreed to in writing, software
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,54 +16,66 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.impl.pb.client;
 
 
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserResponseProto;
 
 
-import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
 
 
-public class GetUserMappingsProtocolClientSideTranslatorPB implements
+public class GetUserMappingsProtocolPBClientImpl implements
     ProtocolMetaInterface, GetUserMappingsProtocol, Closeable {
     ProtocolMetaInterface, GetUserMappingsProtocol, Closeable {
 
 
-  /** RpcController is not used and hence is set to null */
-  private final static RpcController NULL_CONTROLLER = null;
-  private final GetUserMappingsProtocolPB rpcProxy;
+  private GetUserMappingsProtocolPB proxy;
   
   
-  public GetUserMappingsProtocolClientSideTranslatorPB(
-      GetUserMappingsProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
+  public GetUserMappingsProtocolPBClientImpl(
+      long clientVersion, InetSocketAddress addr, Configuration conf)
+      throws IOException {
+    RPC.setProtocolEngine(conf, GetUserMappingsProtocolPB.class,
+        ProtobufRpcEngine.class);
+    proxy = (GetUserMappingsProtocolPB) RPC.getProxy(
+        GetUserMappingsProtocolPB.class, clientVersion, addr, conf);
   }
   }
-
+  
+  public GetUserMappingsProtocolPBClientImpl(
+      GetUserMappingsProtocolPB proxy) {
+    this.proxy = proxy;
+  }
+  
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
-    RPC.stopProxy(rpcProxy);
+    RPC.stopProxy(proxy);
   }
   }
-
+  
   @Override
   @Override
   public String[] getGroupsForUser(String user) throws IOException {
   public String[] getGroupsForUser(String user) throws IOException {
-    GetGroupsForUserRequestProto request = GetGroupsForUserRequestProto
-        .newBuilder().setUser(user).build();
-    GetGroupsForUserResponseProto resp;
+    GetGroupsForUserRequestProto requestProto = 
+        GetGroupsForUserRequestProto.newBuilder().setUser(user).build();
     try {
     try {
-      resp = rpcProxy.getGroupsForUser(NULL_CONTROLLER, request);
-    } catch (ServiceException se) {
-      throw ProtobufHelper.getRemoteException(se);
+      GetGroupsForUserResponseProto responseProto =
+          proxy.getGroupsForUser(null, requestProto);
+      return (String[]) responseProto.getGroupsList().toArray(
+          new String[responseProto.getGroupsCount()]);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
     }
     }
-    return resp.getGroupsList().toArray(new String[resp.getGroupsCount()]);
   }
   }
 
 
   @Override
   @Override
   public boolean isMethodSupported(String methodName) throws IOException {
   public boolean isMethodSupported(String methodName) throws IOException {
-    return RpcClientUtil.isMethodSupported(rpcProxy,
+    return RpcClientUtil.isMethodSupported(proxy,
         GetUserMappingsProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         GetUserMappingsProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
         RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
   }
   }

+ 20 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/impl/pb/service/GetUserMappingsProtocolPBServiceImpl.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  * with the License.  You may obtain a copy of the License at
  *
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  *
  * Unless required by applicable law or agreed to in writing, software
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,42 +16,43 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.impl.pb.service;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetGroupsForUserResponseProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
 
 
-public class GetUserMappingsProtocolServerSideTranslatorPB implements
+public class GetUserMappingsProtocolPBServiceImpl implements
     GetUserMappingsProtocolPB {
     GetUserMappingsProtocolPB {
 
 
-  private final GetUserMappingsProtocol impl;
-
-  public GetUserMappingsProtocolServerSideTranslatorPB(
-      GetUserMappingsProtocol impl) {
-    this.impl = impl;
+  private GetUserMappingsProtocol real;
+  
+  public GetUserMappingsProtocolPBServiceImpl(GetUserMappingsProtocol impl) {
+    this.real = impl;
   }
   }
-
+  
   @Override
   @Override
   public GetGroupsForUserResponseProto getGroupsForUser(
   public GetGroupsForUserResponseProto getGroupsForUser(
       RpcController controller, GetGroupsForUserRequestProto request)
       RpcController controller, GetGroupsForUserRequestProto request)
       throws ServiceException {
       throws ServiceException {
-    String[] groups;
+    String user = request.getUser();
     try {
     try {
-      groups = impl.getGroupsForUser(request.getUser());
+      String[] groups = real.getGroupsForUser(user);
+      GetGroupsForUserResponseProto.Builder responseBuilder =
+          GetGroupsForUserResponseProto.newBuilder();
+      for (String group : groups) {
+        responseBuilder.addGroups(group);
+      }
+      return responseBuilder.build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    GetGroupsForUserResponseProto.Builder builder = GetGroupsForUserResponseProto
-        .newBuilder();
-    for (String g : groups) {
-      builder.addGroups(g);
-    }
-    return builder.build();
   }
   }
+
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java

@@ -101,7 +101,7 @@ public final class ExitUtil {
    * @throws ExitException if System.exit is disabled for test purposes
    * @throws ExitException if System.exit is disabled for test purposes
    */
    */
   public static void terminate(int status, Throwable t) throws ExitException {
   public static void terminate(int status, Throwable t) throws ExitException {
-    terminate(status, t.getMessage());
+    terminate(status, StringUtils.stringifyException(t));
   }
   }
 
 
   /**
   /**

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto → hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto

@@ -15,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "GetUserMappingsProtocolProtos";
+ 
+option java_package = "org.apache.hadoop.tools.proto";
+option java_outer_classname = "GetUserMappingsProtocol";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
 
 

+ 70 - 60
hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm

@@ -24,8 +24,6 @@ Deprecated Properties
 *-------------------------------+-----------------------+
 *-------------------------------+-----------------------+
 || <<Deprecated property name>> || <<New property name>>|
 || <<Deprecated property name>> || <<New property name>>|
 *-------------------------------+-----------------------+
 *-------------------------------+-----------------------+
-|StorageId | dfs.datanode.StorageId
-*---+---+
 |create.empty.dir.if.nonexist | mapreduce.jobcontrol.createdir.ifnotexist
 |create.empty.dir.if.nonexist | mapreduce.jobcontrol.createdir.ifnotexist
 *---+---+
 *---+---+
 |dfs.access.time.precision | dfs.namenode.accesstime.precision
 |dfs.access.time.precision | dfs.namenode.accesstime.precision
@@ -38,14 +36,16 @@ Deprecated Properties
 *---+---+
 *---+---+
 |dfs.block.size | dfs.blocksize
 |dfs.block.size | dfs.blocksize
 *---+---+
 *---+---+
-|dfs.client.buffer.dir | fs.client.buffer.dir
-*---+---+
 |dfs.data.dir | dfs.datanode.data.dir
 |dfs.data.dir | dfs.datanode.data.dir
 *---+---+
 *---+---+
 |dfs.datanode.max.xcievers | dfs.datanode.max.transfer.threads
 |dfs.datanode.max.xcievers | dfs.datanode.max.transfer.threads
 *---+---+
 *---+---+
 |dfs.df.interval | fs.df.interval
 |dfs.df.interval | fs.df.interval
 *---+---+
 *---+---+
+|dfs.federation.nameservice.id | dfs.nameservice.id
+*---+---+
+|dfs.federation.nameservices | dfs.nameservices
+*---+---+
 |dfs.http.address | dfs.namenode.http-address
 |dfs.http.address | dfs.namenode.http-address
 *---+---+
 *---+---+
 |dfs.https.address | dfs.namenode.https-address
 |dfs.https.address | dfs.namenode.https-address
@@ -54,10 +54,10 @@ Deprecated Properties
 *---+---+
 *---+---+
 |dfs.https.need.client.auth | dfs.client.https.need-auth
 |dfs.https.need.client.auth | dfs.client.https.need-auth
 *---+---+
 *---+---+
-|dfs.max-repl-streams | dfs.namenode.replication.max-streams
-*---+---+
 |dfs.max.objects | dfs.namenode.max.objects
 |dfs.max.objects | dfs.namenode.max.objects
 *---+---+
 *---+---+
+|dfs.max-repl-streams | dfs.namenode.replication.max-streams
+*---+---+
 |dfs.name.dir | dfs.namenode.name.dir
 |dfs.name.dir | dfs.namenode.name.dir
 *---+---+
 *---+---+
 |dfs.name.dir.restore | dfs.namenode.name.dir.restore
 |dfs.name.dir.restore | dfs.namenode.name.dir.restore
@@ -86,6 +86,8 @@ Deprecated Properties
 *---+---+
 *---+---+
 |dfs.socket.timeout | dfs.client.socket-timeout
 |dfs.socket.timeout | dfs.client.socket-timeout
 *---+---+
 *---+---+
+|dfs.umaskmode | fs.permissions.umask-mode
+*---+---+
 |dfs.write.packet.size | dfs.client-write-packet-size
 |dfs.write.packet.size | dfs.client-write-packet-size
 *---+---+
 *---+---+
 |fs.checkpoint.dir | dfs.namenode.checkpoint.dir
 |fs.checkpoint.dir | dfs.namenode.checkpoint.dir
@@ -106,10 +108,10 @@ Deprecated Properties
 *---+---+
 *---+---+
 |hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve
 |hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve
 *---+---+
 *---+---+
-|hadoop.pipes.executable | mapreduce.pipes.executable
-*---+---+
 |hadoop.pipes.executable.interpretor | mapreduce.pipes.executable.interpretor
 |hadoop.pipes.executable.interpretor | mapreduce.pipes.executable.interpretor
 *---+---+
 *---+---+
+|hadoop.pipes.executable | mapreduce.pipes.executable
+*---+---+
 |hadoop.pipes.java.mapper | mapreduce.pipes.isjavamapper
 |hadoop.pipes.java.mapper | mapreduce.pipes.isjavamapper
 *---+---+
 *---+---+
 |hadoop.pipes.java.recordreader | mapreduce.pipes.isjavarecordreader
 |hadoop.pipes.java.recordreader | mapreduce.pipes.isjavarecordreader
@@ -130,6 +132,12 @@ Deprecated Properties
 *---+---+
 *---+---+
 |io.sort.spill.percent | mapreduce.map.sort.spill.percent
 |io.sort.spill.percent | mapreduce.map.sort.spill.percent
 *---+---+
 *---+---+
+|jobclient.completion.poll.interval | mapreduce.client.completion.pollinterval
+*---+---+
+|jobclient.output.filter | mapreduce.client.output.filter
+*---+---+
+|jobclient.progress.monitor.poll.interval | mapreduce.client.progressmonitor.pollinterval
+*---+---+
 |job.end.notification.url | mapreduce.job.end-notification.url
 |job.end.notification.url | mapreduce.job.end-notification.url
 *---+---+
 *---+---+
 |job.end.retry.attempts | mapreduce.job.end-notification.retry.attempts
 |job.end.retry.attempts | mapreduce.job.end-notification.retry.attempts
@@ -138,12 +146,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |job.local.dir | mapreduce.job.local.dir
 |job.local.dir | mapreduce.job.local.dir
 *---+---+
 *---+---+
-|jobclient.completion.poll.interval | mapreduce.client.completion.pollinterval
-*---+---+
-|jobclient.output.filter | mapreduce.client.output.filter
-*---+---+
-|jobclient.progress.monitor.poll.interval | mapreduce.client.progressmonitor.pollinterval
-*---+---+
 |keep.failed.task.files | mapreduce.task.files.preserve.failedtasks
 |keep.failed.task.files | mapreduce.task.files.preserve.failedtasks
 *---+---+
 *---+---+
 |keep.task.files.pattern | mapreduce.task.files.preserve.filepattern
 |keep.task.files.pattern | mapreduce.task.files.preserve.filepattern
@@ -196,10 +198,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.compress.map.output | mapreduce.map.output.compress
 |mapred.compress.map.output | mapreduce.map.output.compress
 *---+---+
 *---+---+
-|mapred.create.symlink | NONE - symlinking is always on
-*---+---+
-|mapreduce.job.cache.symlink.create | NONE - symlinking is always on
-*---+---+
 |mapred.data.field.separator | mapreduce.fieldsel.data.field.separator
 |mapred.data.field.separator | mapreduce.fieldsel.data.field.separator
 *---+---+
 *---+---+
 |mapred.debug.out.lines | mapreduce.task.debugout.lines
 |mapred.debug.out.lines | mapreduce.task.debugout.lines
@@ -214,18 +212,18 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.heartbeats.in.second | mapreduce.jobtracker.heartbeats.in.second
 |mapred.heartbeats.in.second | mapreduce.jobtracker.heartbeats.in.second
 *---+---+
 *---+---+
-|mapred.hosts | mapreduce.jobtracker.hosts.filename
-*---+---+
 |mapred.hosts.exclude | mapreduce.jobtracker.hosts.exclude.filename
 |mapred.hosts.exclude | mapreduce.jobtracker.hosts.exclude.filename
 *---+---+
 *---+---+
-|mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold
+|mapred.hosts | mapreduce.jobtracker.hosts.filename
 *---+---+
 *---+---+
-|mapred.input.dir | mapreduce.input.fileinputformat.inputdir
+|mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold
 *---+---+
 *---+---+
 |mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats
 |mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats
 *---+---+
 *---+---+
 |mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers
 |mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers
 *---+---+
 *---+---+
+|mapred.input.dir | mapreduce.input.fileinputformat.inputdir
+*---+---+
 |mapred.input.pathFilter.class | mapreduce.input.pathFilter.class
 |mapred.input.pathFilter.class | mapreduce.input.pathFilter.class
 *---+---+
 *---+---+
 |mapred.jar | mapreduce.job.jar
 |mapred.jar | mapreduce.job.jar
@@ -236,6 +234,8 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.job.id | mapreduce.job.id
 |mapred.job.id | mapreduce.job.id
 *---+---+
 *---+---+
+|mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads
+*---+---+
 |mapred.job.map.memory.mb | mapreduce.map.memory.mb
 |mapred.job.map.memory.mb | mapreduce.map.memory.mb
 *---+---+
 *---+---+
 |mapred.job.name | mapreduce.job.name
 |mapred.job.name | mapreduce.job.name
@@ -258,42 +258,40 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.job.shuffle.merge.percent | mapreduce.reduce.shuffle.merge.percent
 |mapred.job.shuffle.merge.percent | mapreduce.reduce.shuffle.merge.percent
 *---+---+
 *---+---+
-|mapred.job.tracker | mapreduce.jobtracker.address
-*---+---+
 |mapred.job.tracker.handler.count | mapreduce.jobtracker.handler.count
 |mapred.job.tracker.handler.count | mapreduce.jobtracker.handler.count
 *---+---+
 *---+---+
 |mapred.job.tracker.history.completed.location | mapreduce.jobtracker.jobhistory.completed.location
 |mapred.job.tracker.history.completed.location | mapreduce.jobtracker.jobhistory.completed.location
 *---+---+
 *---+---+
 |mapred.job.tracker.http.address | mapreduce.jobtracker.http.address
 |mapred.job.tracker.http.address | mapreduce.jobtracker.http.address
 *---+---+
 *---+---+
+|mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation
+*---+---+
+|mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size
+*---+---+
 |mapred.job.tracker.jobhistory.lru.cache.size | mapreduce.jobtracker.jobhistory.lru.cache.size
 |mapred.job.tracker.jobhistory.lru.cache.size | mapreduce.jobtracker.jobhistory.lru.cache.size
 *---+---+
 *---+---+
+|mapred.job.tracker | mapreduce.jobtracker.address
+*---+---+
+|mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob
+*---+---+
 |mapred.job.tracker.persist.jobstatus.active | mapreduce.jobtracker.persist.jobstatus.active
 |mapred.job.tracker.persist.jobstatus.active | mapreduce.jobtracker.persist.jobstatus.active
 *---+---+
 *---+---+
 |mapred.job.tracker.persist.jobstatus.dir | mapreduce.jobtracker.persist.jobstatus.dir
 |mapred.job.tracker.persist.jobstatus.dir | mapreduce.jobtracker.persist.jobstatus.dir
 *---+---+
 *---+---+
 |mapred.job.tracker.persist.jobstatus.hours | mapreduce.jobtracker.persist.jobstatus.hours
 |mapred.job.tracker.persist.jobstatus.hours | mapreduce.jobtracker.persist.jobstatus.hours
 *---+---+
 *---+---+
-|mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs
+|mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover
 *---+---+
 *---+---+
 |mapred.job.tracker.retiredjobs.cache.size | mapreduce.jobtracker.retiredjobs.cache.size
 |mapred.job.tracker.retiredjobs.cache.size | mapreduce.jobtracker.retiredjobs.cache.size
 *---+---+
 *---+---+
-|mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads
-*---+---+
-|mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation
-*---+---+
-|mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size
-*---+---+
-|mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob
+|mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs
 *---+---+
 *---+---+
-|mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover
+|mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad
 *---+---+
 *---+---+
 |mapred.jobtracker.taskScheduler | mapreduce.jobtracker.taskscheduler
 |mapred.jobtracker.taskScheduler | mapreduce.jobtracker.taskscheduler
 *---+---+
 *---+---+
 |mapred.jobtracker.taskScheduler.maxRunningTasksPerJob | mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob
 |mapred.jobtracker.taskScheduler.maxRunningTasksPerJob | mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob
 *---+---+
 *---+---+
-|mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad
-*---+---+
 |mapred.join.expr | mapreduce.join.expr
 |mapred.join.expr | mapreduce.join.expr
 *---+---+
 *---+---+
 |mapred.join.keycomparator | mapreduce.join.keycomparator
 |mapred.join.keycomparator | mapreduce.join.keycomparator
@@ -320,19 +318,19 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
 |mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
 *---+---+
 *---+---+
-|mapred.map.task.debug.script | mapreduce.map.debug.script
-*---+---+
-|mapred.map.tasks | mapreduce.job.maps
-*---+---+
-|mapred.map.tasks.speculative.execution | mapreduce.map.speculative
-*---+---+
 |mapred.mapoutput.key.class | mapreduce.map.output.key.class
 |mapred.mapoutput.key.class | mapreduce.map.output.key.class
 *---+---+
 *---+---+
 |mapred.mapoutput.value.class | mapreduce.map.output.value.class
 |mapred.mapoutput.value.class | mapreduce.map.output.value.class
 *---+---+
 *---+---+
+|mapred.mapper.regex.group | mapreduce.mapper.regexmapper..group
+*---+---+
 |mapred.mapper.regex | mapreduce.mapper.regex
 |mapred.mapper.regex | mapreduce.mapper.regex
 *---+---+
 *---+---+
-|mapred.mapper.regex.group | mapreduce.mapper.regexmapper..group
+|mapred.map.task.debug.script | mapreduce.map.debug.script
+*---+---+
+|mapred.map.tasks | mapreduce.job.maps
+*---+---+
+|mapred.map.tasks.speculative.execution | mapreduce.map.speculative
 *---+---+
 *---+---+
 |mapred.max.map.failures.percent | mapreduce.map.failures.maxpercent
 |mapred.max.map.failures.percent | mapreduce.map.failures.maxpercent
 *---+---+
 *---+---+
@@ -352,12 +350,12 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.min.split.size.per.rack | mapreduce.input.fileinputformat.split.minsize.per.rack
 |mapred.min.split.size.per.rack | mapreduce.input.fileinputformat.split.minsize.per.rack
 *---+---+
 *---+---+
-|mapred.output.compress | mapreduce.output.fileoutputformat.compress
-*---+---+
 |mapred.output.compression.codec | mapreduce.output.fileoutputformat.compress.codec
 |mapred.output.compression.codec | mapreduce.output.fileoutputformat.compress.codec
 *---+---+
 *---+---+
 |mapred.output.compression.type | mapreduce.output.fileoutputformat.compress.type
 |mapred.output.compression.type | mapreduce.output.fileoutputformat.compress.type
 *---+---+
 *---+---+
+|mapred.output.compress | mapreduce.output.fileoutputformat.compress
+*---+---+
 |mapred.output.dir | mapreduce.output.fileoutputformat.outputdir
 |mapred.output.dir | mapreduce.output.fileoutputformat.outputdir
 *---+---+
 *---+---+
 |mapred.output.key.class | mapreduce.job.output.key.class
 |mapred.output.key.class | mapreduce.job.output.key.class
@@ -440,12 +438,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.task.timeout | mapreduce.task.timeout
 |mapred.task.timeout | mapreduce.task.timeout
 *---+---+
 *---+---+
-|mapred.task.tracker.http.address | mapreduce.tasktracker.http.address
-*---+---+
-|mapred.task.tracker.report.address | mapreduce.tasktracker.report.address
-*---+---+
-|mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller
-*---+---+
 |mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface
 |mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface
 *---+---+
 *---+---+
 |mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver
 |mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver
@@ -454,6 +446,8 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.tasktracker.expiry.interval | mapreduce.jobtracker.expire.trackers.interval
 |mapred.tasktracker.expiry.interval | mapreduce.jobtracker.expire.trackers.interval
 *---+---+
 *---+---+
+|mapred.task.tracker.http.address | mapreduce.tasktracker.http.address
+*---+---+
 |mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb
 |mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb
 *---+---+
 *---+---+
 |mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation
 |mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation
@@ -466,6 +460,10 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.tasktracker.reduce.tasks.maximum | mapreduce.tasktracker.reduce.tasks.maximum
 |mapred.tasktracker.reduce.tasks.maximum | mapreduce.tasktracker.reduce.tasks.maximum
 *---+---+
 *---+---+
+|mapred.task.tracker.report.address | mapreduce.tasktracker.report.address
+*---+---+
+|mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller
+*---+---+
 |mapred.tasktracker.taskmemorymanager.monitoring-interval | mapreduce.tasktracker.taskmemorymanager.monitoringinterval
 |mapred.tasktracker.taskmemorymanager.monitoring-interval | mapreduce.tasktracker.taskmemorymanager.monitoringinterval
 *---+---+
 *---+---+
 |mapred.tasktracker.tasks.sleeptime-before-sigkill | mapreduce.tasktracker.tasks.sleeptimebeforesigkill
 |mapred.tasktracker.tasks.sleeptime-before-sigkill | mapreduce.tasktracker.tasks.sleeptimebeforesigkill
@@ -480,20 +478,12 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapred.tip.id | mapreduce.task.id
 |mapred.tip.id | mapreduce.task.id
 *---+---+
 *---+---+
-|mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used
-*---+---+
-|mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb
-*---+---+
-|mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours
-*---+---+
-|mapred.work.output.dir | mapreduce.task.output.dir
-*---+---+
-|mapred.working.dir | mapreduce.job.working.dir
-*---+---+
 |mapreduce.combine.class | mapreduce.job.combine.class
 |mapreduce.combine.class | mapreduce.job.combine.class
 *---+---+
 *---+---+
 |mapreduce.inputformat.class | mapreduce.job.inputformat.class
 |mapreduce.inputformat.class | mapreduce.job.inputformat.class
 *---+---+
 *---+---+
+|mapreduce.job.counters.limit | mapreduce.job.counters.max
+*---+---+
 |mapreduce.jobtracker.permissions.supergroup | mapreduce.cluster.permissions.supergroup
 |mapreduce.jobtracker.permissions.supergroup | mapreduce.cluster.permissions.supergroup
 *---+---+
 *---+---+
 |mapreduce.map.class | mapreduce.job.map.class
 |mapreduce.map.class | mapreduce.job.map.class
@@ -504,6 +494,16 @@ Deprecated Properties
 *---+---+
 *---+---+
 |mapreduce.reduce.class | mapreduce.job.reduce.class
 |mapreduce.reduce.class | mapreduce.job.reduce.class
 *---+---+
 *---+---+
+|mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used
+*---+---+
+|mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb
+*---+---+
+|mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours
+*---+---+
+|mapred.working.dir | mapreduce.job.working.dir
+*---+---+
+|mapred.work.output.dir | mapreduce.task.output.dir
+*---+---+
 |min.num.spills.for.combine | mapreduce.map.combine.minspills
 |min.num.spills.for.combine | mapreduce.map.combine.minspills
 *---+---+
 *---+---+
 |reduce.output.key.value.fields.spec | mapreduce.fieldsel.reduce.output.key.value.fields.spec
 |reduce.output.key.value.fields.spec | mapreduce.fieldsel.reduce.output.key.value.fields.spec
@@ -537,4 +537,14 @@ Deprecated Properties
 |user.name | mapreduce.job.user.name
 |user.name | mapreduce.job.user.name
 *---+---+
 *---+---+
 |webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted
 |webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted
+*---+---+
+
+  The following table lists additional changes to some configuration properties:
+
+*-------------------------------+-----------------------+
+|| <<Deprecated property name>> || <<New property name>>|
+*-------------------------------+-----------------------+
+|mapred.create.symlink | NONE - symlinking is always on
+*---+---+
+|mapreduce.job.cache.symlink.create | NONE - symlinking is always on
 *---+---+
 *---+---+

+ 193 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java

@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.lang.reflect.Method;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+/**
+ * This class tests the logic for displaying the binary formats supported
+ * by the Text command.
+ */
+public class TestTextCommand {
+  private static final String TEST_ROOT_DIR =
+    System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final String AVRO_FILENAME = TEST_ROOT_DIR + "/weather.avro";
+
+  /**
+   * Tests whether binary Avro data files are displayed correctly.
+   */
+  @Test
+  public void testDisplayForAvroFiles() throws Exception {
+    // Create a small Avro data file on the local file system.
+    createAvroFile(generateWeatherAvroBinaryData());
+
+    // Prepare and call the Text command's protected getInputStream method
+    // using reflection.
+    Configuration conf = new Configuration();
+    File localPath = new File(AVRO_FILENAME);
+    PathData pathData = new PathData(localPath, conf);
+    Display.Text text = new Display.Text();
+    text.setConf(conf);
+    Method method = text.getClass().getDeclaredMethod(
+      "getInputStream", PathData.class);
+    method.setAccessible(true);
+    InputStream stream = (InputStream) method.invoke(text, pathData);
+    String output = inputStreamToString(stream);
+
+    // Check the output.
+    String expectedOutput =
+      "{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
+      System.getProperty("line.separator");
+
+    assertEquals(expectedOutput, output);
+  }
+
+  private String inputStreamToString(InputStream stream) throws IOException {
+    StringWriter writer = new StringWriter();
+    IOUtils.copy(stream, writer);
+    return writer.toString();
+  }
+
+  private void createAvroFile(byte[] contents) throws IOException {
+    (new File(TEST_ROOT_DIR)).mkdir();
+    File file = new File(AVRO_FILENAME);
+    file.createNewFile();
+    FileOutputStream stream = new FileOutputStream(file);
+    stream.write(contents);
+    stream.close();
+  }
+
+  private byte[] generateWeatherAvroBinaryData() {
+    // The contents of a simple binary Avro file with weather records.
+    byte[] contents = {
+      (byte) 0x4f, (byte) 0x62, (byte) 0x6a, (byte)  0x1,
+      (byte)  0x4, (byte) 0x14, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x63,
+      (byte) 0x6f, (byte) 0x64, (byte) 0x65, (byte) 0x63,
+      (byte)  0x8, (byte) 0x6e, (byte) 0x75, (byte) 0x6c,
+      (byte) 0x6c, (byte) 0x16, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x73,
+      (byte) 0x63, (byte) 0x68, (byte) 0x65, (byte) 0x6d,
+      (byte) 0x61, (byte) 0xf2, (byte)  0x2, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x72, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
+      (byte) 0x72, (byte) 0x64, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x57, (byte) 0x65, (byte) 0x61, (byte) 0x74,
+      (byte) 0x68, (byte) 0x65, (byte) 0x72, (byte) 0x22,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x6e, (byte) 0x61,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x73, (byte) 0x70,
+      (byte) 0x61, (byte) 0x63, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x65,
+      (byte) 0x73, (byte) 0x74, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x66, (byte) 0x69, (byte) 0x65,
+      (byte) 0x6c, (byte) 0x64, (byte) 0x73, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x5b, (byte) 0x7b, (byte) 0x22,
+      (byte) 0x6e, (byte) 0x61, (byte) 0x6d, (byte) 0x65,
+      (byte) 0x22, (byte) 0x3a, (byte) 0x22, (byte) 0x73,
+      (byte) 0x74, (byte) 0x61, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6f, (byte) 0x6e, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x73, (byte) 0x74, (byte) 0x72, (byte) 0x69,
+      (byte) 0x6e, (byte) 0x67, (byte) 0x22, (byte) 0x7d,
+      (byte) 0x2c, (byte) 0x7b, (byte) 0x22, (byte) 0x6e,
+      (byte) 0x61, (byte) 0x6d, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x6c, (byte) 0x6f, (byte) 0x6e, (byte) 0x67,
+      (byte) 0x22, (byte) 0x7d, (byte) 0x2c, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x74, (byte) 0x65, (byte) 0x6d, (byte) 0x70,
+      (byte) 0x22, (byte) 0x2c, (byte) 0x22, (byte) 0x74,
+      (byte) 0x79, (byte) 0x70, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x74, (byte) 0x22, (byte) 0x7d, (byte) 0x5d,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x64, (byte) 0x6f,
+      (byte) 0x63, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x41, (byte) 0x20, (byte) 0x77, (byte) 0x65,
+      (byte) 0x61, (byte) 0x74, (byte) 0x68, (byte) 0x65,
+      (byte) 0x72, (byte) 0x20, (byte) 0x72, (byte) 0x65,
+      (byte) 0x61, (byte) 0x64, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x67, (byte) 0x2e, (byte) 0x22, (byte) 0x7d,
+      (byte)  0x0, (byte) 0xb0, (byte) 0x81, (byte) 0xb3,
+      (byte) 0xc4, (byte)  0xa, (byte)  0xc, (byte) 0xf6,
+      (byte) 0x62, (byte) 0xfa, (byte) 0xc9, (byte) 0x38,
+      (byte) 0xfd, (byte) 0x7e, (byte) 0x52, (byte)  0x0,
+      (byte) 0xa7, (byte)  0xa, (byte) 0xcc, (byte)  0x1,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa3, (byte) 0x90,
+      (byte) 0xe8, (byte) 0x87, (byte) 0x24, (byte)  0x0,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0x81, (byte) 0xfb,
+      (byte) 0xd6, (byte) 0x87, (byte) 0x24, (byte) 0x2c,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa5, (byte) 0xae,
+      (byte) 0xc2, (byte) 0x87, (byte) 0x24, (byte) 0x15,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x32,
+      (byte) 0x36, (byte) 0x35, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xb7, (byte) 0xa2,
+      (byte) 0x8b, (byte) 0x94, (byte) 0x26, (byte) 0xde,
+      (byte)  0x1, (byte) 0x18, (byte) 0x30, (byte) 0x31,
+      (byte) 0x32, (byte) 0x36, (byte) 0x35, (byte) 0x30,
+      (byte) 0x2d, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0x39, (byte) 0xff, (byte) 0xdb,
+      (byte) 0xd5, (byte) 0xf6, (byte) 0x93, (byte) 0x26,
+      (byte) 0x9c, (byte)  0x1, (byte) 0xb0, (byte) 0x81,
+      (byte) 0xb3, (byte) 0xc4, (byte)  0xa, (byte)  0xc,
+      (byte) 0xf6, (byte) 0x62, (byte) 0xfa, (byte) 0xc9,
+      (byte) 0x38, (byte) 0xfd, (byte) 0x7e, (byte) 0x52,
+      (byte)  0x0, (byte) 0xa7,
+    };
+
+    return contents;
+  }
+}
+

+ 23 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletRequest;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestServletFilter extends HttpServerFunctionalTest {
 public class TestServletFilter extends HttpServerFunctionalTest {
@@ -163,7 +164,7 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   @Test
   @Test
   public void testServletFilterWhenInitThrowsException() throws Exception {
   public void testServletFilterWhenInitThrowsException() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    // start a http server with CountingFilter
+    // start a http server with ErrorFilter
     conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
     conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
         ErrorFilter.Initializer.class.getName());
         ErrorFilter.Initializer.class.getName());
     HttpServer http = createTestServer(conf);
     HttpServer http = createTestServer(conf);
@@ -174,4 +175,25 @@ public class TestServletFilter extends HttpServerFunctionalTest {
       assertTrue( e.getMessage().contains("Problem in starting http server. Server handlers failed"));
       assertTrue( e.getMessage().contains("Problem in starting http server. Server handlers failed"));
     }
     }
   }
   }
+  
+  /**
+   * Similar to the above test case, except that it uses a different API to add the
+   * filter. Regression test for HADOOP-8786.
+   */
+  @Test
+  public void testContextSpecificServletFilterWhenInitThrowsException()
+      throws Exception {
+    Configuration conf = new Configuration();
+    HttpServer http = createTestServer(conf);
+    http.defineFilter(http.webAppContext,
+        "ErrorFilter", ErrorFilter.class.getName(),
+        null, null);
+    try {
+      http.start();
+      fail("expecting exception");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Unable to initialize WebAppContext", e);
+    }
+  }
+
 }
 }

+ 181 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java

@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.notification.Failure;
+
+public class TestTimedOutTestsListener {
+
+  public static class Deadlock {
+    private CyclicBarrier barrier = new CyclicBarrier(6);
+  
+    public Deadlock() {
+      DeadlockThread[] dThreads = new DeadlockThread[6];
+  
+      Monitor a = new Monitor("a");
+      Monitor b = new Monitor("b");
+      Monitor c = new Monitor("c");
+      dThreads[0] = new DeadlockThread("MThread-1", a, b);
+      dThreads[1] = new DeadlockThread("MThread-2", b, c);
+      dThreads[2] = new DeadlockThread("MThread-3", c, a);
+  
+      Lock d = new ReentrantLock();
+      Lock e = new ReentrantLock();
+      Lock f = new ReentrantLock();
+  
+      dThreads[3] = new DeadlockThread("SThread-4", d, e);
+      dThreads[4] = new DeadlockThread("SThread-5", e, f);
+      dThreads[5] = new DeadlockThread("SThread-6", f, d);
+  
+      // make them daemon threads so that the test will exit
+      for (int i = 0; i < 6; i++) {
+        dThreads[i].setDaemon(true);
+        dThreads[i].start();
+      }
+    }
+  
+    class DeadlockThread extends Thread {
+      private Lock lock1 = null;
+  
+      private Lock lock2 = null;
+  
+      private Monitor mon1 = null;
+  
+      private Monitor mon2 = null;
+  
+      private boolean useSync;
+  
+      DeadlockThread(String name, Lock lock1, Lock lock2) {
+        super(name);
+        this.lock1 = lock1;
+        this.lock2 = lock2;
+        this.useSync = true;
+      }
+  
+      DeadlockThread(String name, Monitor mon1, Monitor mon2) {
+        super(name);
+        this.mon1 = mon1;
+        this.mon2 = mon2;
+        this.useSync = false;
+      }
+  
+      public void run() {
+        if (useSync) {
+          syncLock();
+        } else {
+          monitorLock();
+        }
+      }
+  
+      private void syncLock() {
+        lock1.lock();
+        try {
+          try {
+            barrier.await();
+          } catch (Exception e) {
+          }
+          goSyncDeadlock();
+        } finally {
+          lock1.unlock();
+        }
+      }
+  
+      private void goSyncDeadlock() {
+        try {
+          barrier.await();
+        } catch (Exception e) {
+        }
+        lock2.lock();
+        throw new RuntimeException("should not reach here.");
+      }
+  
+      private void monitorLock() {
+        synchronized (mon1) {
+          try {
+            barrier.await();
+          } catch (Exception e) {
+          }
+          goMonitorDeadlock();
+        }
+      }
+  
+      private void goMonitorDeadlock() {
+        try {
+          barrier.await();
+        } catch (Exception e) {
+        }
+        synchronized (mon2) {
+          throw new RuntimeException(getName() + " should not reach here.");
+        }
+      }
+    }
+  
+    class Monitor {
+      String name;
+  
+      Monitor(String name) {
+        this.name = name;
+      }
+    }
+  
+  }
+
+  @Test(timeout=500)
+  public void testThreadDumpAndDeadlocks() throws Exception {
+    new Deadlock();
+    String s = null;
+    while (true) {
+      s = TimedOutTestsListener.buildDeadlockInfo();
+      if (s != null)
+        break;
+      Thread.sleep(100);
+    }
+    
+    Assert.assertEquals(3, countStringOccurrences(s, "BLOCKED"));
+    
+    Failure failure = new Failure(
+        null, new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
+    StringWriter writer = new StringWriter();
+    new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
+    String out = writer.toString();
+    
+    Assert.assertTrue(out.contains("THREAD DUMP"));
+    Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
+    
+    System.out.println(out);
+  }
+
+  private int countStringOccurrences(String s, String substr) {
+    int n = 0;
+    int index = 0;
+    while ((index = s.indexOf(substr, index) + 1) != 0) {
+      n++;
+    }
+    return n;
+  }
+
+}

+ 166 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java

@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.management.LockInfo;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MonitorInfo;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Map;
+
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+/**
+ * JUnit run listener which prints full thread dump into System.err
+ * in case a test is failed due to timeout.
+ */
+public class TimedOutTestsListener extends RunListener {
+
+  static final String TEST_TIMED_OUT_PREFIX = "test timed out after";
+  
+  private static String INDENT = "    ";
+
+  private final PrintWriter output;
+  
+  public TimedOutTestsListener() {
+    this.output = new PrintWriter(System.err);
+  }
+  
+  public TimedOutTestsListener(PrintWriter output) {
+    this.output = output;
+  }
+
+  @Override
+  public void testFailure(Failure failure) throws Exception {
+    if (failure != null && failure.getMessage() != null 
+        && failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
+      output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
+      output.println();
+      DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
+      output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
+      output.println();
+      output.println(buildThreadDump());
+      
+      String deadlocksInfo = buildDeadlockInfo();
+      if (deadlocksInfo != null) {
+        output.println("====> DEADLOCKS DETECTED <====");
+        output.println();
+        output.println(deadlocksInfo);
+      }
+    }
+  }
+
+  static String buildThreadDump() {
+    StringBuilder dump = new StringBuilder();
+    Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces();
+    for (Map.Entry<Thread, StackTraceElement[]> e : stackTraces.entrySet()) {
+      Thread thread = e.getKey();
+      dump.append(String.format(
+          "\"%s\" %s prio=%d tid=%d %s\njava.lang.Thread.State: %s",
+          thread.getName(),
+          (thread.isDaemon() ? "daemon" : ""),
+          thread.getPriority(),
+          thread.getId(),
+          Thread.State.WAITING.equals(thread.getState()) ? 
+              "in Object.wait()" : thread.getState().name().toLowerCase(),
+          Thread.State.WAITING.equals(thread.getState()) ? 
+              "WAITING (on object monitor)" : thread.getState()));
+      for (StackTraceElement stackTraceElement : e.getValue()) {
+        dump.append("\n        at ");
+        dump.append(stackTraceElement);
+      }
+      dump.append("\n");
+    }
+    return dump.toString();
+  }
+  
+  static String buildDeadlockInfo() {
+    ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+    long[] threadIds = threadBean.findMonitorDeadlockedThreads();
+    if (threadIds != null && threadIds.length > 0) {
+      StringWriter stringWriter = new StringWriter();
+      PrintWriter out = new PrintWriter(stringWriter);
+      
+      ThreadInfo[] infos = threadBean.getThreadInfo(threadIds, true, true);
+      for (ThreadInfo ti : infos) {
+        printThreadInfo(ti, out);
+        printLockInfo(ti.getLockedSynchronizers(), out);
+        out.println();
+      }
+      
+      out.close();
+      return stringWriter.toString();
+    } else {
+      return null;
+    }
+  }
+  
+  private static void printThreadInfo(ThreadInfo ti, PrintWriter out) {
+    // print thread information
+    printThread(ti, out);
+
+    // print stack trace with locks
+    StackTraceElement[] stacktrace = ti.getStackTrace();
+    MonitorInfo[] monitors = ti.getLockedMonitors();
+    for (int i = 0; i < stacktrace.length; i++) {
+      StackTraceElement ste = stacktrace[i];
+      out.println(INDENT + "at " + ste.toString());
+      for (MonitorInfo mi : monitors) {
+        if (mi.getLockedStackDepth() == i) {
+          out.println(INDENT + "  - locked " + mi);
+        }
+      }
+    }
+    out.println();
+  }
+
+  private static void printThread(ThreadInfo ti, PrintWriter out) {
+    out.print("\"" + ti.getThreadName() + "\"" + " Id="
+        + ti.getThreadId() + " in " + ti.getThreadState());
+    if (ti.getLockName() != null) {
+      out.print(" on lock=" + ti.getLockName());
+    }
+    if (ti.isSuspended()) {
+      out.print(" (suspended)");
+    }
+    if (ti.isInNative()) {
+      out.print(" (running in native)");
+    }
+    out.println();
+    if (ti.getLockOwnerName() != null) {
+      out.println(INDENT + " owned by " + ti.getLockOwnerName() + " Id="
+          + ti.getLockOwnerId());
+    }
+  }
+
+  private static void printLockInfo(LockInfo[] locks, PrintWriter out) {
+    out.println(INDENT + "Locked synchronizers: count = " + locks.length);
+    for (LockInfo li : locks) {
+      out.println(INDENT + "  - " + li);
+    }
+    out.println();
+  }
+  
+}

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -301,6 +301,12 @@
             <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
             <java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
             <kerberos.realm>${kerberos.realm}</kerberos.realm>
             <kerberos.realm>${kerberos.realm}</kerberos.realm>
           </systemPropertyVariables>
           </systemPropertyVariables>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
           <excludes>
           <excludes>
             <exclude>**/${test.exclude}.java</exclude>
             <exclude>**/${test.exclude}.java</exclude>
             <exclude>${test.exclude.pattern}</exclude>
             <exclude>${test.exclude.pattern}</exclude>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java

@@ -181,7 +181,7 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
       throw new ServerException(ServerException.ERROR.S13, portKey);
       throw new ServerException(ServerException.ERROR.S13, portKey);
     }
     }
     try {
     try {
-      InetAddress add = InetAddress.getByName(hostnameKey);
+      InetAddress add = InetAddress.getByName(host);
       int portNum = Integer.parseInt(port);
       int portNum = Integer.parseInt(port);
       return new InetSocketAddress(add, portNum);
       return new InetSocketAddress(add, portNum);
     } catch (UnknownHostException ex) {
     } catch (UnknownHostException ex) {

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm

@@ -81,8 +81,3 @@ Hadoop HDFS over HTTP - Documentation Sets ${project.version}
 
 
   * {{{./UsingHttpTools.html}Using HTTP Tools}}
   * {{{./UsingHttpTools.html}Using HTTP Tools}}
 
 
-* Current Limitations
-
-  <<<GETDELEGATIONTOKEN, RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN>>>
-  operations are not supported.
-

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java

@@ -24,8 +24,11 @@ import org.apache.hadoop.lib.server.Server;
 import org.apache.hadoop.test.HTestCase;
 import org.apache.hadoop.test.HTestCase;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDirHelper;
 import org.apache.hadoop.test.TestDirHelper;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import java.net.InetSocketAddress;
+
 public class TestServerWebApp extends HTestCase {
 public class TestServerWebApp extends HTestCase {
 
 
   @Test(expected = IllegalArgumentException.class)
   @Test(expected = IllegalArgumentException.class)
@@ -74,4 +77,23 @@ public class TestServerWebApp extends HTestCase {
 
 
     server.contextInitialized(null);
     server.contextInitialized(null);
   }
   }
+
+  @Test
+  @TestDir
+  public void testResolveAuthority() throws Exception {
+    String dir = TestDirHelper.getTestDir().getAbsolutePath();
+    System.setProperty("TestServerWebApp3.home.dir", dir);
+    System.setProperty("TestServerWebApp3.config.dir", dir);
+    System.setProperty("TestServerWebApp3.log.dir", dir);
+    System.setProperty("TestServerWebApp3.temp.dir", dir);
+    System.setProperty("testserverwebapp3.http.hostname", "localhost");
+    System.setProperty("testserverwebapp3.http.port", "14000");
+    ServerWebApp server = new ServerWebApp("TestServerWebApp3") {
+    };
+
+    InetSocketAddress address = server.resolveAuthority();
+    Assert.assertEquals("localhost", address.getHostName());
+    Assert.assertEquals(14000, address.getPort());
+  }
+
 }
 }

+ 33 - 1
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -16,6 +16,10 @@ Trunk (Unreleased)
     HDFS-3601. Add BlockPlacementPolicyWithNodeGroup to support block placement
     HDFS-3601. Add BlockPlacementPolicyWithNodeGroup to support block placement
     with 4-layer network topology.  (Junping Du via szetszwo)
     with 4-layer network topology.  (Junping Du via szetszwo)
 
 
+    HDFS-3703. Datanodes are marked stale if heartbeat is not received in
+    configured timeout and are selected as the last location to read from.
+    (Jing Zhao via suresh)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -225,11 +229,23 @@ Release 2.0.3-alpha - Unreleased
     (Jaimin D Jetly and Jing Zhao via szetszwo)
     (Jaimin D Jetly and Jing Zhao via szetszwo)
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+  
+    HDFS-3925. Prettify PipelineAck#toString() for printing to a log
+    (Andrew Wang via todd)
+
+    HDFS-3939. NN RPC address cleanup. (eli)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
+    (Andy Isaacson via eli)
+
+    HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm)
+
+    HDFS-3936. MiniDFSCluster shutdown races with BlocksMap usage. (eli)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -464,6 +480,10 @@ Release 2.0.2-alpha - 2012-09-07
 
 
     HDFS-3888. Clean up BlockPlacementPolicyDefault.  (Jing Zhao via szetszwo)
     HDFS-3888. Clean up BlockPlacementPolicyDefault.  (Jing Zhao via szetszwo)
 
 
+    HDFS-3907. Allow multiple users for local block readers. (eli)
+
+    HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-2982. Startup performance suffers when there are many edit log
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -750,7 +770,16 @@ Release 2.0.2-alpha - 2012-09-07
 
 
     HDFS-2757. Cannot read a local block that's being written to when
     HDFS-2757. Cannot read a local block that's being written to when
     using the local read short circuit. (Jean-Daniel Cryans via eli)
     using the local read short circuit. (Jean-Daniel Cryans via eli)
-    
+
+    HDFS-3664. BlockManager race when stopping active services.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3928. MiniDFSCluster should reset the first ExitException on shutdown. (eli)
+   
+    HDFS-3938. remove current limitations from HttpFS docs. (tucu)
+
+    HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
+ 
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -770,6 +799,9 @@ Release 2.0.2-alpha - 2012-09-07
     HDFS-3833. TestDFSShell fails on windows due to concurrent file 
     HDFS-3833. TestDFSShell fails on windows due to concurrent file 
     read/write. (Brandon Li via suresh)
     read/write. (Brandon Li via suresh)
 
 
+    HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken.
+    (Andy Isaacson via eli)
+
 Release 2.0.0-alpha - 05-23-2012
 Release 2.0.0-alpha - 05-23-2012
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -189,6 +189,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <startKdc>${startKdc}</startKdc>
             <startKdc>${startKdc}</startKdc>
             <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
             <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
           </systemPropertyVariables>
           </systemPropertyVariables>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
       <plugin>
       <plugin>

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,10 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 public class HdfsVolumeId implements VolumeId {
 public class HdfsVolumeId implements VolumeId {
 
 
-  private final byte id;
+  private final byte[] id;
   private final boolean isValid;
   private final boolean isValid;
 
 
-  public HdfsVolumeId(byte id, boolean isValid) {
+  public HdfsVolumeId(byte[] id, boolean isValid) {
     this.id = id;
     this.id = id;
     this.isValid = isValid;
     this.isValid = isValid;
   }
   }
@@ -69,6 +70,6 @@ public class HdfsVolumeId implements VolumeId {
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
-    return Byte.toString(id);
+    return Base64.encodeBase64String(id);
   }
   }
 }
 }

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -201,7 +202,7 @@ class BlockStorageLocationUtil {
       ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
       ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
       // Start off all IDs as invalid, fill it in later with results from RPCs
       // Start off all IDs as invalid, fill it in later with results from RPCs
       for (int i = 0; i < b.getLocations().length; i++) {
       for (int i = 0; i < b.getLocations().length; i++) {
-        l.add(new HdfsVolumeId((byte)-1, false));
+        l.add(new HdfsVolumeId(null, false));
       }
       }
       blockVolumeIds.put(b, l);
       blockVolumeIds.put(b, l);
     }
     }
@@ -234,8 +235,8 @@ class BlockStorageLocationUtil {
         }
         }
         // Get the VolumeId by indexing into the list of VolumeIds
         // Get the VolumeId by indexing into the list of VolumeIds
         // provided by the datanode
         // provided by the datanode
-        HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0],
-            true);
+        byte[] volumeId = metaVolumeIds.get(volumeIndex);
+        HdfsVolumeId id = new HdfsVolumeId(volumeId, true);
         // Find out which index we are in the LocatedBlock's replicas
         // Find out which index we are in the LocatedBlock's replicas
         LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
         LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
         DatanodeInfo[] dnInfos = locBlock.getLocations();
         DatanodeInfo[] dnInfos = locBlock.getLocations();
@@ -255,8 +256,8 @@ class BlockStorageLocationUtil {
         }
         }
         // Place VolumeId at the same index as the DN's index in the list of
         // Place VolumeId at the same index as the DN's index in the list of
         // replicas
         // replicas
-        List<VolumeId> VolumeIds = blockVolumeIds.get(locBlock);
-        VolumeIds.set(index, id);
+        List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
+        volumeIds.set(index, id);
       }
       }
     }
     }
     return blockVolumeIds;
     return blockVolumeIds;

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -174,6 +174,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
   public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
   public static final String  DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
   public static final String  DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
   public static final int     DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
   public static final int     DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
+  
+  // Whether to enable datanode's stale state detection and usage
+  public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode";
+  public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false;
+  // The default value of the time interval for marking datanodes as stale
+  public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = "dfs.namenode.stale.datanode.interval";
+  public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT = 30 * 1000; // 30s
 
 
   // Replication monitoring related keys
   // Replication monitoring related keys
   public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =
   public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =

+ 37 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -128,6 +128,43 @@ public class DFSUtil {
           a.isDecommissioned() ? 1 : -1;
           a.isDecommissioned() ? 1 : -1;
       }
       }
     };
     };
+    
+      
+  /**
+   * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
+   * Decommissioned/stale nodes are moved to the end of the array on sorting
+   * with this compartor.
+   */ 
+  @InterfaceAudience.Private 
+  public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
+    private long staleInterval;
+
+    /**
+     * Constructor of DecomStaleComparator
+     * 
+     * @param interval
+     *          The time invertal for marking datanodes as stale is passed from
+     *          outside, since the interval may be changed dynamically
+     */
+    public DecomStaleComparator(long interval) {
+      this.staleInterval = interval;
+    }
+
+    @Override
+    public int compare(DatanodeInfo a, DatanodeInfo b) {
+      // Decommissioned nodes will still be moved to the end of the list
+      if (a.isDecommissioned()) {
+        return b.isDecommissioned() ? 0 : 1;
+      } else if (b.isDecommissioned()) {
+        return -1;
+      }
+      // Stale nodes will be moved behind the normal nodes
+      boolean aStale = a.isStale(staleInterval);
+      boolean bStale = b.isStale(staleInterval);
+      return aStale == bStale ? 0 : (aStale ? 1 : -1);
+    }
+  }    
+    
   /**
   /**
    * Address matcher for matching an address to local address
    * Address matcher for matching an address to local address
    */
    */

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -105,4 +105,9 @@ public class HdfsConfiguration extends Configuration {
     deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
     deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
     deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
     deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
   }
   }
+
+  public static void main(String[] args) {
+    init();
+    Configuration.dumpDeprecatedKeys();
+  }
 }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
@@ -67,6 +65,8 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
@@ -218,7 +218,7 @@ public class NameNodeProxies {
       throws IOException {
       throws IOException {
     GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB)
     GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB)
         createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class, 0);
         createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class, 0);
-    return new GetUserMappingsProtocolClientSideTranslatorPB(proxy);
+    return new GetUserMappingsProtocolPBClientImpl(proxy);
   }
   }
   
   
   private static NamenodeProtocol createNNProxyWithNamenodeProtocol(
   private static NamenodeProtocol createNNProxyWithNamenodeProtocol(

+ 19 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 
 
 /** 
 /** 
  * This class extends the primary identifier of a Datanode with ephemeral
  * This class extends the primary identifier of a Datanode with ephemeral
@@ -321,7 +322,24 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     }
     return adminState;
     return adminState;
   }
   }
-
+ 
+  /**
+   * Check if the datanode is in stale state. Here if 
+   * the namenode has not received heartbeat msg from a 
+   * datanode for more than staleInterval (default value is
+   * {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT}),
+   * the datanode will be treated as stale node.
+   * 
+   * @param staleInterval
+   *          the time interval for marking the node as stale. If the last
+   *          update time is beyond the given time interval, the node will be
+   *          marked as stale.
+   * @return true if the node is stale
+   */
+  public boolean isStale(long staleInterval) {
+    return (Time.now() - lastUpdate) >= staleInterval;
+  }
+  
   /**
   /**
    * Sets the admin state of this node.
    * Sets the admin state of this node.
    */
    */

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java

@@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 
 
+import com.google.protobuf.TextFormat;
+
 /** Pipeline Acknowledgment **/
 /** Pipeline Acknowledgment **/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -120,6 +122,6 @@ public class PipelineAck {
   
   
   @Override //Object
   @Override //Object
   public String toString() {
   public String toString() {
-    return proto.toString();
+    return TextFormat.shortDebugString(proto);
   }
   }
 }
 }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -363,11 +363,10 @@ public class BlockManager {
         replicationThread.join(3000);
         replicationThread.join(3000);
       }
       }
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
-    } finally {
-      if (pendingReplications != null) pendingReplications.stop();
-      blocksMap.close();
-      datanodeManager.close();
     }
     }
+    datanodeManager.close();
+    pendingReplications.stop();
+    blocksMap.close();
   }
   }
 
 
   /** @return the datanodeManager */
   /** @return the datanodeManager */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -94,7 +94,7 @@ class BlocksMap {
   }
   }
 
 
   void close() {
   void close() {
-    blocks = null;
+    // Empty blocks once GSet#clear is implemented (HDFS-3940)
   }
   }
 
 
   BlockCollection getBlockCollection(Block b) {
   BlockCollection getBlockCollection(Block b) {

+ 35 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
@@ -132,6 +133,11 @@ public class DatanodeManager {
    */
    */
   private boolean hasClusterEverBeenMultiRack = false;
   private boolean hasClusterEverBeenMultiRack = false;
   
   
+  /** Whether or not to check the stale datanodes */
+  private volatile boolean checkForStaleNodes;
+  /** The time interval for detecting stale datanodes */
+  private volatile long staleInterval;
+  
   DatanodeManager(final BlockManager blockManager,
   DatanodeManager(final BlockManager blockManager,
       final Namesystem namesystem, final Configuration conf
       final Namesystem namesystem, final Configuration conf
       ) throws IOException {
       ) throws IOException {
@@ -175,6 +181,21 @@ public class DatanodeManager {
         DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
         DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
     LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
     LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
         + "=" + this.blockInvalidateLimit);
         + "=" + this.blockInvalidateLimit);
+    // set the value of stale interval based on configuration
+    this.checkForStaleNodes = conf.getBoolean(
+        DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY,
+        DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT);
+    if (this.checkForStaleNodes) {
+      this.staleInterval = conf.getLong(
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+          DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT);
+      if (this.staleInterval < DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT) {
+        LOG.warn("The given interval for marking stale datanode = "
+            + this.staleInterval + ", which is smaller than the default value "
+            + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT
+            + ".");
+      }
+    }
   }
   }
 
 
   private Daemon decommissionthread = null;
   private Daemon decommissionthread = null;
@@ -192,7 +213,13 @@ public class DatanodeManager {
   }
   }
 
 
   void close() {
   void close() {
-    if (decommissionthread != null) decommissionthread.interrupt();
+    if (decommissionthread != null) {
+      decommissionthread.interrupt();
+      try {
+        decommissionthread.join(3000);
+      } catch (InterruptedException e) {
+      }
+    }
     heartbeatManager.close();
     heartbeatManager.close();
   }
   }
 
 
@@ -225,14 +252,17 @@ public class DatanodeManager {
       if (rName != null)
       if (rName != null)
         client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
         client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
     }
     }
+    
+    Comparator<DatanodeInfo> comparator = checkForStaleNodes ? 
+                    new DFSUtil.DecomStaleComparator(staleInterval) : 
+                    DFSUtil.DECOM_COMPARATOR;
     for (LocatedBlock b : locatedblocks) {
     for (LocatedBlock b : locatedblocks) {
       networktopology.pseudoSortByDistance(client, b.getLocations());
       networktopology.pseudoSortByDistance(client, b.getLocations());
-      
-      // Move decommissioned datanodes to the bottom
-      Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
+      // Move decommissioned/stale datanodes to the bottom
+      Arrays.sort(b.getLocations(), comparator);
     }
     }
   }
   }
-
+  
   CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
   CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
       final String firstkey) {
       final String firstkey) {
     return new CyclicIteration<String, DatanodeDescriptor>(
     return new CyclicIteration<String, DatanodeDescriptor>(

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -74,6 +74,11 @@ class HeartbeatManager implements DatanodeStatistics {
 
 
   void close() {
   void close() {
     heartbeatThread.interrupt();
     heartbeatThread.interrupt();
+    try {
+      // This will no effect if the thread hasn't yet been started.
+      heartbeatThread.join(3000);
+    } catch (InterruptedException e) {
+    }
   }
   }
   
   
   synchronized int getLiveDatanodeCount() {
   synchronized int getLiveDatanodeCount() {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -374,7 +374,8 @@ class BlockPoolSliceScanner {
     throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
     throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
   }
   }
   
   
-  private void verifyBlock(ExtendedBlock block) {
+  @VisibleForTesting
+  void verifyBlock(ExtendedBlock block) {
     BlockSender blockSender = null;
     BlockSender blockSender = null;
 
 
     /* In case of failure, attempt to read second time to reduce
     /* In case of failure, attempt to read second time to reduce

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java

@@ -172,7 +172,8 @@ public class DataBlockScanner implements Runnable {
     return blockPoolScannerMap.size();
     return blockPoolScannerMap.size();
   }
   }
   
   
-  private synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
+  @VisibleForTesting
+  synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
     return blockPoolScannerMap.get(bpid);
     return blockPoolScannerMap.get(bpid);
   }
   }
   
   

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -277,7 +277,7 @@ public class DataNode extends Configured
   private AbstractList<File> dataDirs;
   private AbstractList<File> dataDirs;
   private Configuration conf;
   private Configuration conf;
 
 
-  private final String userWithLocalPathAccess;
+  private final List<String> usersWithLocalPathAccess;
   private boolean connectToDnViaHostname;
   private boolean connectToDnViaHostname;
   ReadaheadPool readaheadPool;
   ReadaheadPool readaheadPool;
   private final boolean getHdfsBlockLocationsEnabled;
   private final boolean getHdfsBlockLocationsEnabled;
@@ -300,8 +300,8 @@ public class DataNode extends Configured
            final SecureResources resources) throws IOException {
            final SecureResources resources) throws IOException {
     super(conf);
     super(conf);
 
 
-    this.userWithLocalPathAccess =
-        conf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
+    this.usersWithLocalPathAccess = Arrays.asList(
+        conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
     this.connectToDnViaHostname = conf.getBoolean(
     this.connectToDnViaHostname = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
         DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
@@ -1012,7 +1012,7 @@ public class DataNode extends Configured
   private void checkBlockLocalPathAccess() throws IOException {
   private void checkBlockLocalPathAccess() throws IOException {
     checkKerberosAuthMethod("getBlockLocalPathInfo()");
     checkKerberosAuthMethod("getBlockLocalPathInfo()");
     String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
     String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
-    if (!currentUser.equals(this.userWithLocalPathAccess)) {
+    if (!usersWithLocalPathAccess.contains(currentUser)) {
       throw new AccessControlException(
       throw new AccessControlException(
           "Can't continue with getBlockLocalPathInfo() "
           "Can't continue with getBlockLocalPathInfo() "
               + "authorization. The user " + currentUser
               + "authorization. The user " + currentUser

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -24,6 +24,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
@@ -1676,10 +1677,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
     List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
     // List of indexes into the list of VolumeIds, pointing at the VolumeId of
     // List of indexes into the list of VolumeIds, pointing at the VolumeId of
     // the volume that the block is on
     // the volume that the block is on
-    List<Integer> blocksVolumendexes = new ArrayList<Integer>(blocks.size());
+    List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blocks.size());
     // Initialize the list of VolumeIds simply by enumerating the volumes
     // Initialize the list of VolumeIds simply by enumerating the volumes
     for (int i = 0; i < volumes.volumes.size(); i++) {
     for (int i = 0; i < volumes.volumes.size(); i++) {
-      blocksVolumeIds.add(new byte[] { (byte) i });
+      blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
     }
     }
     // Determine the index of the VolumeId of each block's volume, by comparing 
     // Determine the index of the VolumeId of each block's volume, by comparing 
     // the block's volume against the enumerated volumes
     // the block's volume against the enumerated volumes
@@ -1700,10 +1701,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       if (!isValid) {
       if (!isValid) {
         volumeIndex = Integer.MAX_VALUE;
         volumeIndex = Integer.MAX_VALUE;
       }
       }
-      blocksVolumendexes.add(volumeIndex);
+      blocksVolumeIndexes.add(volumeIndex);
     }
     }
     return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), 
     return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), 
-        blocksVolumeIds, blocksVolumendexes);
+        blocksVolumeIds, blocksVolumeIndexes);
   }
   }
 
 
   @Override
   @Override

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1173,6 +1173,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (blocks != null) {
     if (blocks != null) {
       blockManager.getDatanodeManager().sortLocatedBlocks(
       blockManager.getDatanodeManager().sortLocatedBlocks(
           clientMachine, blocks.getLocatedBlocks());
           clientMachine, blocks.getLocatedBlocks());
+      
+      LocatedBlock lastBlock = blocks.getLastLocatedBlock();
+      if (lastBlock != null) {
+        ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
+        lastBlockList.add(lastBlock);
+        blockManager.getDatanodeManager().sortLocatedBlocks(
+                              clientMachine, lastBlockList);
+      }
     }
     }
     return blocks;
     return blocks;
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -71,7 +71,7 @@ public class FileChecksumServlets {
         String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
         String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
         dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
         dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
       }
       }
-      String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+      String addr = nn.getNameNodeAddressHostPortString();
       String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
       String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
 
 
       return new URL(scheme, hostname, port, 
       return new URL(scheme, hostname, port, 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.ServletUtil;
 
 
@@ -74,7 +73,7 @@ public class FileDataServlet extends DfsServlet {
     // Add namenode address to the url params
     // Add namenode address to the url params
     NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
     NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
         getServletContext());
         getServletContext());
-    String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+    String addr = nn.getNameNodeAddressHostPortString();
     String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
     String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
     
     
     return new URL(scheme, hostname, port,
     return new URL(scheme, hostname, port,

+ 22 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@@ -489,9 +488,9 @@ public class NameNode {
         LOG.warn("ServicePlugin " + p + " could not be started", t);
         LOG.warn("ServicePlugin " + p + " could not be started", t);
       }
       }
     }
     }
-    LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress());
+    LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
     if (rpcServer.getServiceRpcAddress() != null) {
     if (rpcServer.getServiceRpcAddress() != null) {
-      LOG.info(getRole() + " service server is up at: "
+      LOG.info(getRole() + " service RPC up at: "
           + rpcServer.getServiceRpcAddress());
           + rpcServer.getServiceRpcAddress());
     }
     }
   }
   }
@@ -617,7 +616,7 @@ public class NameNode {
    */
    */
   public void join() {
   public void join() {
     try {
     try {
-      this.rpcServer.join();
+      rpcServer.join();
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
       LOG.info("Caught interrupted exception ", ie);
       LOG.info("Caught interrupted exception ", ie);
     }
     }
@@ -665,27 +664,31 @@ public class NameNode {
   }
   }
 
 
   /**
   /**
-   * Returns the address on which the NameNodes is listening to.
-   * @return namenode rpc address
+   * @return NameNode RPC address
    */
    */
   public InetSocketAddress getNameNodeAddress() {
   public InetSocketAddress getNameNodeAddress() {
     return rpcServer.getRpcAddress();
     return rpcServer.getRpcAddress();
   }
   }
-  
+
+  /**
+   * @return NameNode RPC address in "host:port" string form
+   */
+  public String getNameNodeAddressHostPortString() {
+    return NetUtils.getHostPortString(rpcServer.getRpcAddress());
+  }
+
   /**
   /**
-   * Returns namenode service rpc address, if set. Otherwise returns
-   * namenode rpc address.
-   * @return namenode service rpc address used by datanodes
+   * @return NameNode service RPC address if configured, the
+   *    NameNode RPC address otherwise
    */
    */
   public InetSocketAddress getServiceRpcAddress() {
   public InetSocketAddress getServiceRpcAddress() {
-    return rpcServer.getServiceRpcAddress() != null ? rpcServer.getServiceRpcAddress() : rpcServer.getRpcAddress();
+    final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress();
+    return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr;
   }
   }
 
 
   /**
   /**
-   * Returns the address of the NameNodes http server, 
-   * which is used to access the name-node web UI.
-   * 
-   * @return the http address.
+   * @return NameNode HTTP address, used by the Web UI, image transfer,
+   *    and HTTP-based file system clients like Hftp and WebHDFS
    */
    */
   public InetSocketAddress getHttpAddress() {
   public InetSocketAddress getHttpAddress() {
     return httpServer.getHttpAddress();
     return httpServer.getHttpAddress();
@@ -1171,10 +1174,12 @@ public class NameNode {
           NAMESERVICE_SPECIFIC_KEYS);
           NAMESERVICE_SPECIFIC_KEYS);
     }
     }
     
     
+    // If the RPC address is set use it to (re-)configure the default FS
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
       conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
       conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+      LOG.info("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
     }
     }
   }
   }
     
     
@@ -1196,8 +1201,9 @@ public class NameNode {
     try {
     try {
       StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
       StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
       NameNode namenode = createNameNode(argv, null);
       NameNode namenode = createNameNode(argv, null);
-      if (namenode != null)
+      if (namenode != null) {
         namenode.join();
         namenode.join();
+      }
     } catch (Throwable e) {
     } catch (Throwable e) {
       LOG.fatal("Exception in namenode join", e);
       LOG.fatal("Exception in namenode join", e);
       terminate(1, e);
       terminate(1, e);

+ 0 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -49,12 +49,9 @@ public class NameNodeHttpServer {
   private final Configuration conf;
   private final Configuration conf;
   private final NameNode nn;
   private final NameNode nn;
   
   
-  private final Log LOG = NameNode.LOG;
   private InetSocketAddress httpAddress;
   private InetSocketAddress httpAddress;
-  
   private InetSocketAddress bindAddress;
   private InetSocketAddress bindAddress;
   
   
-  
   public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
   public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
   public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
   public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
   protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
   protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
@@ -68,12 +65,6 @@ public class NameNodeHttpServer {
     this.bindAddress = bindAddress;
     this.bindAddress = bindAddress;
   }
   }
   
   
-  private String getDefaultServerPrincipal() throws IOException {
-    return SecurityUtil.getServerPrincipal(
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
-        nn.getNameNodeAddress().getHostName());
-  }
-
   public void start() throws IOException {
   public void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
     final String infoHost = bindAddress.getHostName();
     int infoPort = bindAddress.getPort();
     int infoPort = bindAddress.getPort();

+ 53 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
@@ -74,8 +73,6 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
@@ -119,6 +116,9 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.tools.GetUserMappingsProtocolPB;
+import org.apache.hadoop.tools.impl.pb.service.GetUserMappingsProtocolPBServiceImpl;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetUserMappingsProtocolService;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionUtil;
 import org.apache.hadoop.util.VersionUtil;
 
 
@@ -159,10 +159,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
     int handlerCount = 
     int handlerCount = 
       conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, 
       conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, 
                   DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
                   DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
-    InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
-		RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
-         ProtobufRpcEngine.class);
-     ClientNamenodeProtocolServerSideTranslatorPB 
+
+    RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    ClientNamenodeProtocolServerSideTranslatorPB 
        clientProtocolServerTranslator = 
        clientProtocolServerTranslator = 
          new ClientNamenodeProtocolServerSideTranslatorPB(this);
          new ClientNamenodeProtocolServerSideTranslatorPB(this);
      BlockingService clientNNPbService = ClientNamenodeProtocol.
      BlockingService clientNNPbService = ClientNamenodeProtocol.
@@ -188,8 +189,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
     BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
     BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
         .newReflectiveBlockingService(refreshUserMappingXlator);
         .newReflectiveBlockingService(refreshUserMappingXlator);
 
 
-    GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = 
-        new GetUserMappingsProtocolServerSideTranslatorPB(this);
+    GetUserMappingsProtocolPBServiceImpl getUserMappingXlator = 
+        new GetUserMappingsProtocolPBServiceImpl(this);
     BlockingService getUserMappingService = GetUserMappingsProtocolService
     BlockingService getUserMappingService = GetUserMappingsProtocolService
         .newReflectiveBlockingService(getUserMappingXlator);
         .newReflectiveBlockingService(getUserMappingXlator);
     
     
@@ -199,22 +200,24 @@ class NameNodeRpcServer implements NamenodeProtocols {
         .newReflectiveBlockingService(haServiceProtocolXlator);
         .newReflectiveBlockingService(haServiceProtocolXlator);
 	  
 	  
     WritableRpcEngine.ensureInitialized();
     WritableRpcEngine.ensureInitialized();
-    
-    InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf);
-    if (dnSocketAddr != null) {
+
+    InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
+    if (serviceRpcAddr != null) {
       int serviceHandlerCount =
       int serviceHandlerCount =
         conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
         conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
                     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
                     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
-      // Add all the RPC protocols that the namenode implements
-      this.serviceRpcServer = new RPC.Builder(conf)
+      serviceRpcServer = new RPC.Builder(conf)
           .setProtocol(
           .setProtocol(
               org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
               org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
           .setInstance(clientNNPbService)
           .setInstance(clientNNPbService)
-          .setBindAddress(dnSocketAddr.getHostName())
-          .setPort(dnSocketAddr.getPort()).setNumHandlers(serviceHandlerCount)
+          .setBindAddress(serviceRpcAddr.getHostName())
+          .setPort(serviceRpcAddr.getPort())
+          .setNumHandlers(serviceHandlerCount)
           .setVerbose(false)
           .setVerbose(false)
           .setSecretManager(namesystem.getDelegationTokenSecretManager())
           .setSecretManager(namesystem.getDelegationTokenSecretManager())
           .build();
           .build();
+
+      // Add all the RPC protocols that the namenode implements
       DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
       DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
           serviceRpcServer);
           serviceRpcServer);
       DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
       DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
@@ -228,20 +231,26 @@ class NameNodeRpcServer implements NamenodeProtocols {
       DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
       DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
           getUserMappingService, serviceRpcServer);
           getUserMappingService, serviceRpcServer);
   
   
-      this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+      serviceRPCAddress = serviceRpcServer.getListenerAddress();
       nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
       nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
     } else {
     } else {
       serviceRpcServer = null;
       serviceRpcServer = null;
       serviceRPCAddress = null;
       serviceRPCAddress = null;
     }
     }
-    // Add all the RPC protocols that the namenode implements
-    this.clientRpcServer = new RPC.Builder(conf)
+
+    InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
+    clientRpcServer = new RPC.Builder(conf)
         .setProtocol(
         .setProtocol(
             org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
             org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
-        .setInstance(clientNNPbService).setBindAddress(socAddr.getHostName())
-        .setPort(socAddr.getPort()).setNumHandlers(handlerCount)
+        .setInstance(clientNNPbService)
+        .setBindAddress(rpcAddr.getHostName())
+        .setPort(rpcAddr.getPort())
+        .setNumHandlers(handlerCount)
         .setVerbose(false)
         .setVerbose(false)
-        .setSecretManager(namesystem.getDelegationTokenSecretManager()).build();
+        .setSecretManager(namesystem.getDelegationTokenSecretManager())
+        .build();
+
+    // Add all the RPC protocols that the namenode implements
     DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
     DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
         clientRpcServer);
         clientRpcServer);
     DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
     DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
@@ -259,44 +268,51 @@ class NameNodeRpcServer implements NamenodeProtocols {
     if (serviceAuthEnabled =
     if (serviceAuthEnabled =
           conf.getBoolean(
           conf.getBoolean(
             CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
             CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
-      this.clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
-      if (this.serviceRpcServer != null) {
-        this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      if (serviceRpcServer != null) {
+        serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
       }
       }
     }
     }
 
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
     // The rpc-server port can be ephemeral... ensure we have the correct info
-    this.clientRpcAddress = this.clientRpcServer.getListenerAddress(); 
+    clientRpcAddress = clientRpcServer.getListenerAddress();
     nn.setRpcServerAddress(conf, clientRpcAddress);
     nn.setRpcServerAddress(conf, clientRpcAddress);
     
     
-    this.minimumDataNodeVersion = conf.get(
+    minimumDataNodeVersion = conf.get(
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
 
 
     // Set terse exception whose stack trace won't be logged
     // Set terse exception whose stack trace won't be logged
-    this.clientRpcServer.addTerseExceptions(SafeModeException.class);
+    clientRpcServer.addTerseExceptions(SafeModeException.class);
  }
  }
   
   
   /**
   /**
-   * Actually start serving requests.
+   * Start client and service RPC servers.
    */
    */
   void start() {
   void start() {
-    clientRpcServer.start();  //start RPC server
+    clientRpcServer.start();
     if (serviceRpcServer != null) {
     if (serviceRpcServer != null) {
       serviceRpcServer.start();      
       serviceRpcServer.start();      
     }
     }
   }
   }
   
   
   /**
   /**
-   * Wait until the RPC server has shut down.
+   * Wait until the client RPC server has shutdown.
    */
    */
   void join() throws InterruptedException {
   void join() throws InterruptedException {
-    this.clientRpcServer.join();
+    clientRpcServer.join();
   }
   }
-  
+
+  /**
+   * Stop client and service RPC servers.
+   */
   void stop() {
   void stop() {
-    if(clientRpcServer != null) clientRpcServer.stop();
-    if(serviceRpcServer != null) serviceRpcServer.stop();
+    if (clientRpcServer != null) {
+      clientRpcServer.stop();
+    }
+    if (serviceRpcServer != null) {
+      serviceRpcServer.stop();
+    }
   }
   }
   
   
   InetSocketAddress getServiceRpcAddress() {
   InetSocketAddress getServiceRpcAddress() {
@@ -333,8 +349,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     verifyRequest(registration);
     verifyRequest(registration);
     LOG.info("Error report from " + registration + ": " + msg);
     LOG.info("Error report from " + registration + ": " + msg);
-    if(errorCode == FATAL)
+    if (errorCode == FATAL) {
       namesystem.releaseBackupNode(registration);
       namesystem.releaseBackupNode(registration);
+    }
   }
   }
 
 
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -444,7 +443,7 @@ class NamenodeJspHelper {
       nodeToRedirect = nn.getHttpAddress().getHostName();
       nodeToRedirect = nn.getHttpAddress().getHostName();
       redirectPort = nn.getHttpAddress().getPort();
       redirectPort = nn.getHttpAddress().getPort();
     }
     }
-    String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
+    String addr = nn.getNameNodeAddressHostPortString();
     String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
     String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
     redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
     redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
         + "/browseDirectory.jsp?namenodeInfoPort="
         + "/browseDirectory.jsp?namenodeInfoPort="
@@ -615,8 +614,9 @@ class NamenodeJspHelper {
       final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
       final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
       dm.fetchDatanodes(live, dead, true);
       dm.fetchDatanodes(live, dead, true);
 
 
-      InetSocketAddress nnSocketAddress = (InetSocketAddress) context
-          .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
+      InetSocketAddress nnSocketAddress =
+          (InetSocketAddress)context.getAttribute(
+              NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
       String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
       String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
           + nnSocketAddress.getPort();
           + nnSocketAddress.getPort();
 
 

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -965,6 +965,30 @@
   <value>${dfs.web.authentication.kerberos.principal}</value>
   <value>${dfs.web.authentication.kerberos.principal}</value>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.namenode.check.stale.datanode</name>
+  <value>false</value>
+  <description>
+  	Indicate whether or not to check "stale" datanodes whose 
+  	heartbeat messages have not been received by the namenode 
+  	for more than a specified time interval. If this configuration 
+  	parameter is set as true, the stale datanodes will be moved to 
+  	the end of the target node list for reading. The writing will 
+  	also try to avoid stale nodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.stale.datanode.interval</name>
+  <value>30000</value>
+  <description>
+  	Default time interval for marking a datanode as "stale", i.e., if 
+  	the namenode has not received heartbeat msg from a datanode for 
+  	more than this time interval, the datanode will be marked and treated 
+  	as "stale" by default.
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.namenode.invalidate.work.pct.per.iteration</name>
   <name>dfs.namenode.invalidate.work.pct.per.iteration</name>
   <value>0.32f</value>
   <value>0.32f</value>

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp

@@ -34,8 +34,7 @@
   HAServiceState nnHAState = nn.getServiceState();
   HAServiceState nnHAState = nn.getServiceState();
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   String namenodeRole = nn.getRole().toString();
   String namenodeRole = nn.getRole().toString();
-  String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
-      + nn.getNameNodeAddress().getPort();
+  String namenodeLabel = nn.getNameNodeAddressHostPortString();
   Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
   Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
 	fsn.listCorruptFileBlocks("/", null);
 	fsn.listCorruptFileBlocks("/", null);
   int corruptFileCount = corruptFileBlocks.size();
   int corruptFileCount = corruptFileBlocks.size();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp

@@ -34,7 +34,7 @@
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   boolean isActive = (nnHAState == HAServiceState.ACTIVE);
   String namenodeRole = nn.getRole().toString();
   String namenodeRole = nn.getRole().toString();
   String namenodeState = nnHAState.toString();
   String namenodeState = nnHAState.toString();
-  String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+  String namenodeLabel = nn.getNameNodeAddressHostPortString();
 %>
 %>
 
 
 <!DOCTYPE html>
 <!DOCTYPE html>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp

@@ -33,7 +33,7 @@ String namenodeRole = nn.getRole().toString();
 FSNamesystem fsn = nn.getNamesystem();
 FSNamesystem fsn = nn.getNamesystem();
 HAServiceState nnHAState = nn.getServiceState();
 HAServiceState nnHAState = nn.getServiceState();
 boolean isActive = (nnHAState == HAServiceState.ACTIVE);
 boolean isActive = (nnHAState == HAServiceState.ACTIVE);
-String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+String namenodeLabel = nn.getNameNodeAddressHostPortString();
 %>
 %>
 
 
 <!DOCTYPE html>
 <!DOCTYPE html>

+ 27 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -274,7 +274,7 @@ public class DFSTestUtil {
    * specified target.
    * specified target.
    */
    */
   public void waitReplication(FileSystem fs, String topdir, short value) 
   public void waitReplication(FileSystem fs, String topdir, short value) 
-                                              throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     Path root = new Path(topdir);
     Path root = new Path(topdir);
 
 
     /** wait for the replication factor to settle down */
     /** wait for the replication factor to settle down */
@@ -499,36 +499,44 @@ public class DFSTestUtil {
       return fileNames;
       return fileNames;
     }
     }
   }
   }
-  
-  /** wait for the file's replication to be done */
-  public static void waitReplication(FileSystem fs, Path fileName, 
-      short replFactor)  throws IOException {
-    boolean good;
+
+  /**
+   * Wait for the given file to reach the given replication factor.
+   * @throws TimeoutException if we fail to sufficiently replicate the file
+   */
+  public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
+      throws IOException, InterruptedException, TimeoutException {
+    boolean correctReplFactor;
+    final int ATTEMPTS = 20;
+    int count = 0;
+
     do {
     do {
-      good = true;
+      correctReplFactor = true;
       BlockLocation locs[] = fs.getFileBlockLocations(
       BlockLocation locs[] = fs.getFileBlockLocations(
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+      count++;
       for (int j = 0; j < locs.length; j++) {
       for (int j = 0; j < locs.length; j++) {
         String[] hostnames = locs[j].getNames();
         String[] hostnames = locs[j].getNames();
         if (hostnames.length != replFactor) {
         if (hostnames.length != replFactor) {
-          String hostNameList = "";
-          for (String h : hostnames) hostNameList += h + " ";
-          System.out.println("Block " + j + " of file " + fileName 
-              + " has replication factor " + hostnames.length + "; locations "
-              + hostNameList);
-          good = false;
-          try {
-            System.out.println("Waiting for replication factor to drain");
-            Thread.sleep(100);
-          } catch (InterruptedException e) {} 
+          correctReplFactor = false;
+          System.out.println("Block " + j + " of file " + fileName
+              + " has replication factor " + hostnames.length
+              + " (desired " + replFactor + "); locations "
+              + Joiner.on(' ').join(hostnames));
+          Thread.sleep(1000);
           break;
           break;
         }
         }
       }
       }
-      if (good) {
+      if (correctReplFactor) {
         System.out.println("All blocks of file " + fileName
         System.out.println("All blocks of file " + fileName
             + " verified to have replication factor " + replFactor);
             + " verified to have replication factor " + replFactor);
       }
       }
-    } while(!good);
+    } while (!correctReplFactor && count < ATTEMPTS);
+
+    if (count == ATTEMPTS) {
+      throw new TimeoutException("Timed out waiting for " + fileName +
+          " to reach " + replFactor + " replicas");
+    }
   }
   }
   
   
   /** delete directory and everything underneath it.*/
   /** delete directory and everything underneath it.*/

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -857,8 +857,8 @@ public class MiniDFSCluster {
     // After the NN has started, set back the bound ports into
     // After the NN has started, set back the bound ports into
     // the conf
     // the conf
     conf.set(DFSUtil.addKeySuffixes(
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils
-        .getHostPortString(nn.getNameNodeAddress()));
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
+        nn.getNameNodeAddressHostPortString());
     conf.set(DFSUtil.addKeySuffixes(
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
         DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
         .getHostPortString(nn.getHttpAddress()));
         .getHostPortString(nn.getHttpAddress()));
@@ -880,8 +880,8 @@ public class MiniDFSCluster {
    * @return URI of the given namenode in MiniDFSCluster
    * @return URI of the given namenode in MiniDFSCluster
    */
    */
   public URI getURI(int nnIndex) {
   public URI getURI(int nnIndex) {
-    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
-    String hostPort = NetUtils.getHostPortString(addr);
+    String hostPort =
+        nameNodes[nnIndex].nameNode.getNameNodeAddressHostPortString();
     URI uri = null;
     URI uri = null;
     try {
     try {
       uri = new URI("hdfs://" + hostPort);
       uri = new URI("hdfs://" + hostPort);
@@ -918,7 +918,8 @@ public class MiniDFSCluster {
   /**
   /**
    * wait for the cluster to get out of safemode.
    * wait for the cluster to get out of safemode.
    */
    */
-  public void waitClusterUp() {
+  public void waitClusterUp() throws IOException {
+    int i = 0;
     if (numDataNodes > 0) {
     if (numDataNodes > 0) {
       while (!isClusterUp()) {
       while (!isClusterUp()) {
         try {
         try {
@@ -926,6 +927,9 @@ public class MiniDFSCluster {
           Thread.sleep(1000);
           Thread.sleep(1000);
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
         }
         }
+        if (++i > 10) {
+          throw new IOException("Timed out waiting for Mini HDFS Cluster to start");
+        }
       }
       }
     }
     }
   }
   }
@@ -1354,6 +1358,7 @@ public class MiniDFSCluster {
       if (ExitUtil.terminateCalled()) {
       if (ExitUtil.terminateCalled()) {
         LOG.fatal("Test resulted in an unexpected exit",
         LOG.fatal("Test resulted in an unexpected exit",
             ExitUtil.getFirstExitException());
             ExitUtil.getFirstExitException());
+        ExitUtil.resetFirstExitException();
         throw new AssertionError("Test resulted in an unexpected exit");
         throw new AssertionError("Test resulted in an unexpected exit");
       }
       }
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java

@@ -61,7 +61,7 @@ public class TestBlockReaderLocal {
    * of this class might immediately issue a retry on failure, so it's polite.
    * of this class might immediately issue a retry on failure, so it's polite.
    */
    */
   @Test
   @Test
-  public void testStablePositionAfterCorruptRead() throws IOException {
+  public void testStablePositionAfterCorruptRead() throws Exception {
     final short REPL_FACTOR = 1;
     final short REPL_FACTOR = 1;
     final long FILE_LENGTH = 512L;
     final long FILE_LENGTH = 512L;
     cluster.waitActive();
     cluster.waitActive();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
 import java.util.Random;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -199,11 +200,11 @@ public class TestClientReportBadBlock {
   }
   }
 
 
   /**
   /**
-   * create a file with one block and corrupt some/all of the block replicas.
+   * Create a file with one block and corrupt some/all of the block replicas.
    */
    */
   private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
   private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
       int corruptBlockCount) throws IOException, AccessControlException,
       int corruptBlockCount) throws IOException, AccessControlException,
-      FileNotFoundException, UnresolvedLinkException {
+      FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
     DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
     DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
     DFSTestUtil.waitReplication(dfs, filePath, repl);
     DFSTestUtil.waitReplication(dfs, filePath, repl);
     // Locate the file blocks by asking name node
     // Locate the file blocks by asking name node

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -789,8 +789,7 @@ public class TestDFSClientRetries {
    * way. See HDFS-3067.
    * way. See HDFS-3067.
    */
    */
   @Test
   @Test
-  public void testRetryOnChecksumFailure()
-      throws UnresolvedLinkException, IOException {
+  public void testRetryOnChecksumFailure() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
     MiniDFSCluster cluster =
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
       new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -831,7 +830,7 @@ public class TestDFSClientRetries {
   }
   }
 
 
   /** Test client retry with namenode restarting. */
   /** Test client retry with namenode restarting. */
-  @Test
+  @Test(timeout=300000)
   public void testNamenodeRestart() throws Exception {
   public void testNamenodeRestart() throws Exception {
     namenodeRestartTest(new Configuration(), false);
     namenodeRestartTest(new Configuration(), false);
   }
   }

+ 16 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -34,14 +34,19 @@ import java.util.regex.Pattern;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner {
   
   
   private static Pattern pattern_blockVerify = 
   private static Pattern pattern_blockVerify = 
              Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
              Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
+  
+  static {
+    ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
+  }
   /**
   /**
    * This connects to datanode and fetches block verification data.
    * This connects to datanode and fetches block verification data.
    * It repeats this until the given block has a verification time > newTime.
    * It repeats this until the given block has a verification time > newTime.
@@ -173,7 +182,7 @@ public class TestDatanodeBlockScanner {
   }
   }
 
 
   @Test
   @Test
-  public void testBlockCorruptionPolicy() throws IOException {
+  public void testBlockCorruptionPolicy() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     Random random = new Random();
     Random random = new Random();
@@ -206,12 +215,12 @@ public class TestDatanodeBlockScanner {
     assertTrue(MiniDFSCluster.corruptReplica(1, block));
     assertTrue(MiniDFSCluster.corruptReplica(1, block));
     assertTrue(MiniDFSCluster.corruptReplica(2, block));
     assertTrue(MiniDFSCluster.corruptReplica(2, block));
 
 
-    // Read the file to trigger reportBadBlocks by client
-    try {
-      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
-                        conf, true);
-    } catch (IOException e) {
-      // Ignore exception
+    // Trigger each of the DNs to scan this block immediately.
+    // The block pool scanner doesn't run frequently enough on its own
+    // to notice these, and due to HDFS-1371, the client won't report
+    // bad blocks to the NN when all replicas are bad.
+    for (DataNode dn : cluster.getDataNodes()) {
+      DataNodeTestUtils.runBlockScannerForBlock(dn, block);
     }
     }
 
 
     // We now have the blocks to be marked as corrupt and we get back all
     // We now have the blocks to be marked as corrupt and we get back all

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java

@@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -100,7 +101,7 @@ public class TestFileStatus {
   }
   }
   
   
   private void checkFile(FileSystem fileSys, Path name, int repl)
   private void checkFile(FileSystem fileSys, Path name, int repl)
-      throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     DFSTestUtil.waitReplication(fileSys, name, (short) repl);
     DFSTestUtil.waitReplication(fileSys, name, (short) repl);
   }
   }
   
   
@@ -129,7 +130,7 @@ public class TestFileStatus {
 
 
   /** Test the FileStatus obtained calling getFileStatus on a file */  
   /** Test the FileStatus obtained calling getFileStatus on a file */  
   @Test
   @Test
-  public void testGetFileStatusOnFile() throws IOException {
+  public void testGetFileStatusOnFile() throws Exception {
     checkFile(fs, file1, 1);
     checkFile(fs, file1, 1);
     // test getFileStatus on a file
     // test getFileStatus on a file
     FileStatus status = fs.getFileStatus(file1);
     FileStatus status = fs.getFileStatus(file1);

+ 164 - 41
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -17,8 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -28,48 +27,178 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.junit.Test;
+
 /**
 /**
- * This class tests if block replacement request to data nodes work correctly.
+ * This class tests if getblocks request works correctly.
  */
  */
 public class TestGetBlocks {
 public class TestGetBlocks {
+  private static final int blockSize = 8192;
+  private static final String racks[] = new String[] { "/d1/r1", "/d1/r1",
+      "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" };
+  private static final int numDatanodes = racks.length;
+
+  /**
+   * Stop the heartbeat of a datanode in the MiniDFSCluster
+   * 
+   * @param cluster
+   *          The MiniDFSCluster
+   * @param hostName
+   *          The hostName of the datanode to be stopped
+   * @return The DataNode whose heartbeat has been stopped
+   */
+  private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) {
+    for (DataNode dn : cluster.getDataNodes()) {
+      if (dn.getDatanodeId().getHostName().equals(hostName)) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+        return dn;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Test if the datanodes returned by
+   * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct
+   * when stale nodes checking is enabled. Also test during the scenario when 1)
+   * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
+   * becomes stale happen simultaneously
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testReadSelectNonStaleDatanode() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    long staleInterval = 30 * 1000 * 60;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+        staleInterval);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDatanodes).racks(racks).build();
+
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+        cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
+        .getNamesystem().getBlockManager().getDatanodeManager()
+        .getDatanodeListForReport(DatanodeReportType.LIVE);
+    assertEquals("Unexpected number of datanodes", numDatanodes,
+        nodeInfoList.size());
+    FileSystem fileSys = cluster.getFileSystem();
+    FSDataOutputStream stm = null;
+    try {
+      // do the writing but do not close the FSDataOutputStream
+      // in order to mimic the ongoing writing
+      final Path fileName = new Path("/file1");
+      stm = fileSys.create(
+          fileName,
+          true,
+          fileSys.getConf().getInt(
+              CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+          (short) 3, blockSize);
+      stm.write(new byte[(blockSize * 3) / 2]);
+      // We do not close the stream so that
+      // the writing seems to be still ongoing
+      stm.hflush();
+
+      LocatedBlocks blocks = client.getNamenode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodes = blocks.get(0).getLocations();
+      assertEquals(nodes.length, 3);
+      DataNode staleNode = null;
+      DatanodeDescriptor staleNodeInfo = null;
+      // stop the heartbeat of the first node
+      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+      assertNotNull(staleNode);
+      // set the first node as stale
+      staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager()
+          .getDatanode(staleNode.getDatanodeId());
+      staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
+
+      LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations(
+          fileName.toString(), 0, blockSize);
+      DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
+      assertEquals(nodesAfterStale.length, 3);
+      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+
+      // restart the staleNode's heartbeat
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false);
+      // reset the first node as non-stale, so as to avoid two stale nodes
+      staleNodeInfo.setLastUpdate(Time.now());
+
+      LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0,
+          Long.MAX_VALUE).getLastLocatedBlock();
+      nodes = lastBlock.getLocations();
+      assertEquals(nodes.length, 3);
+      // stop the heartbeat of the first node for the last block
+      staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+      assertNotNull(staleNode);
+      // set the node as stale
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager()
+          .getDatanode(staleNode.getDatanodeId())
+          .setLastUpdate(Time.now() - staleInterval - 1);
+
+      LocatedBlock lastBlockAfterStale = client.getLocatedBlocks(
+          fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock();
+      nodesAfterStale = lastBlockAfterStale.getLocations();
+      assertEquals(nodesAfterStale.length, 3);
+      assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+    } finally {
+      if (stm != null) {
+        stm.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
   /** test getBlocks */
   /** test getBlocks */
   @Test
   @Test
   public void testGetBlocks() throws Exception {
   public void testGetBlocks() throws Exception {
     final Configuration CONF = new HdfsConfiguration();
     final Configuration CONF = new HdfsConfiguration();
 
 
-    final short REPLICATION_FACTOR = (short)2;
+    final short REPLICATION_FACTOR = (short) 2;
     final int DEFAULT_BLOCK_SIZE = 1024;
     final int DEFAULT_BLOCK_SIZE = 1024;
     final Random r = new Random();
     final Random r = new Random();
-    
+
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
-                                               .numDataNodes(REPLICATION_FACTOR)
-                                               .build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
+        REPLICATION_FACTOR).build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
-      
+
       // create a file with two blocks
       // create a file with two blocks
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
       FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
           REPLICATION_FACTOR);
           REPLICATION_FACTOR);
-      byte [] data = new byte[1024];
-      long fileLen = 2*DEFAULT_BLOCK_SIZE;
+      byte[] data = new byte[1024];
+      long fileLen = 2 * DEFAULT_BLOCK_SIZE;
       long bytesToWrite = fileLen;
       long bytesToWrite = fileLen;
-      while( bytesToWrite > 0 ) {
+      while (bytesToWrite > 0) {
         r.nextBytes(data);
         r.nextBytes(data);
-        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
+        int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
+            : (int) bytesToWrite;
         out.write(data, 0, bytesToWriteNext);
         out.write(data, 0, bytesToWriteNext);
         bytesToWrite -= bytesToWriteNext;
         bytesToWrite -= bytesToWriteNext;
       }
       }
@@ -77,27 +206,28 @@ public class TestGetBlocks {
 
 
       // get blocks & data nodes
       // get blocks & data nodes
       List<LocatedBlock> locatedBlocks;
       List<LocatedBlock> locatedBlocks;
-      DatanodeInfo[] dataNodes=null;
+      DatanodeInfo[] dataNodes = null;
       boolean notWritten;
       boolean notWritten;
       do {
       do {
-        final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
-        locatedBlocks = dfsclient.getNamenode().
-          getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
+        final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF),
+            CONF);
+        locatedBlocks = dfsclient.getNamenode()
+            .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());
         assertEquals(2, locatedBlocks.size());
         notWritten = false;
         notWritten = false;
-        for(int i=0; i<2; i++) {
+        for (int i = 0; i < 2; i++) {
           dataNodes = locatedBlocks.get(i).getLocations();
           dataNodes = locatedBlocks.get(i).getLocations();
-          if(dataNodes.length != REPLICATION_FACTOR) {
+          if (dataNodes.length != REPLICATION_FACTOR) {
             notWritten = true;
             notWritten = true;
             try {
             try {
               Thread.sleep(10);
               Thread.sleep(10);
-            } catch(InterruptedException e) {
+            } catch (InterruptedException e) {
             }
             }
             break;
             break;
           }
           }
         }
         }
-      } while(notWritten);
-      
+      } while (notWritten);
+
       // get RPC client to namenode
       // get RPC client to namenode
       InetSocketAddress addr = new InetSocketAddress("localhost",
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
           cluster.getNameNodePort());
@@ -122,7 +252,7 @@ public class TestGetBlocks {
       assertEquals(locs[0].getStorageIDs().length, 2);
       assertEquals(locs[0].getStorageIDs().length, 2);
 
 
       // get blocks of size 0 from dataNodes[0]
       // get blocks of size 0 from dataNodes[0]
-      getBlocksWithException(namenode, dataNodes[0], 0);     
+      getBlocksWithException(namenode, dataNodes[0], 0);
 
 
       // get blocks of size -1 from dataNodes[0]
       // get blocks of size -1 from dataNodes[0]
       getBlocksWithException(namenode, dataNodes[0], -1);
       getBlocksWithException(namenode, dataNodes[0], -1);
@@ -136,46 +266,39 @@ public class TestGetBlocks {
   }
   }
 
 
   private void getBlocksWithException(NamenodeProtocol namenode,
   private void getBlocksWithException(NamenodeProtocol namenode,
-                                      DatanodeInfo datanode,
-                                      long size) throws IOException {
+      DatanodeInfo datanode, long size) throws IOException {
     boolean getException = false;
     boolean getException = false;
     try {
     try {
-        namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
-    } catch(RemoteException e) {
+      namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
+    } catch (RemoteException e) {
       getException = true;
       getException = true;
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
     }
     }
     assertTrue(getException);
     assertTrue(getException);
   }
   }
- 
+
   @Test
   @Test
   public void testBlockKey() {
   public void testBlockKey() {
     Map<Block, Long> map = new HashMap<Block, Long>();
     Map<Block, Long> map = new HashMap<Block, Long>();
     final Random RAN = new Random();
     final Random RAN = new Random();
     final long seed = RAN.nextLong();
     final long seed = RAN.nextLong();
-    System.out.println("seed=" +  seed);
+    System.out.println("seed=" + seed);
     RAN.setSeed(seed);
     RAN.setSeed(seed);
 
 
-    long[] blkids = new long[10]; 
-    for(int i = 0; i < blkids.length; i++) {
+    long[] blkids = new long[10];
+    for (int i = 0; i < blkids.length; i++) {
       blkids[i] = 1000L + RAN.nextInt(100000);
       blkids[i] = 1000L + RAN.nextInt(100000);
       map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
       map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
     }
     }
     System.out.println("map=" + map.toString().replace(",", "\n  "));
     System.out.println("map=" + map.toString().replace(",", "\n  "));
-    
-    for(int i = 0; i < blkids.length; i++) {
-      Block b = new Block(blkids[i], 0, GenerationStamp.GRANDFATHER_GENERATION_STAMP);
+
+    for (int i = 0; i < blkids.length; i++) {
+      Block b = new Block(blkids[i], 0,
+          GenerationStamp.GRANDFATHER_GENERATION_STAMP);
       Long v = map.get(b);
       Long v = map.get(b);
       System.out.println(b + " => " + v);
       System.out.println(b + " => " + v);
       assertEquals(blkids[i], v.longValue());
       assertEquals(blkids[i], v.longValue());
     }
     }
   }
   }
 
 
-  /**
-   * @param args
-   */
-  public static void main(String[] args) throws Exception {
-    (new TestGetBlocks()).testGetBlocks();
-  }
-
 }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
@@ -41,6 +40,7 @@ import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
@@ -141,8 +141,8 @@ public class TestIsMethodSupported {
   
   
   @Test
   @Test
   public void testGetUserMappingsProtocol() throws IOException {
   public void testGetUserMappingsProtocol() throws IOException {
-    GetUserMappingsProtocolClientSideTranslatorPB translator = 
-        (GetUserMappingsProtocolClientSideTranslatorPB)
+    GetUserMappingsProtocolPBClientImpl translator = 
+        (GetUserMappingsProtocolPBClientImpl)
         NameNodeProxies.createNonHAProxy(conf, nnAddress,
         NameNodeProxies.createNonHAProxy(conf, nnAddress,
             GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
             GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();
             true).getProxy();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -27,6 +27,7 @@ import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.Random;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -420,8 +421,8 @@ public class TestReplication {
     }
     }
   }
   }
   
   
-  private void changeBlockLen(MiniDFSCluster cluster, 
-      int lenDelta) throws IOException, InterruptedException {
+  private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+      throws IOException, InterruptedException, TimeoutException {
     final Path fileName = new Path("/file1");
     final Path fileName = new Path("/file1");
     final short REPLICATION_FACTOR = (short)1;
     final short REPLICATION_FACTOR = (short)1;
     final FileSystem fs = cluster.getFileSystem();
     final FileSystem fs = cluster.getFileSystem();

+ 25 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -224,7 +224,8 @@ public class TestShortCircuitLocalRead {
   @Test
   @Test
   public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
   public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
+    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+        "alloweduser1,alloweduser2");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .format(true).build();
         .format(true).build();
     cluster.waitActive();
     cluster.waitActive();
@@ -232,8 +233,10 @@ public class TestShortCircuitLocalRead {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     try {
     try {
       DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
       DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
-      UserGroupInformation aUgi = UserGroupInformation
-          .createRemoteUser("alloweduser");
+      UserGroupInformation aUgi1 =
+          UserGroupInformation.createRemoteUser("alloweduser1");
+      UserGroupInformation aUgi2 =
+          UserGroupInformation.createRemoteUser("alloweduser2");
       LocatedBlocks lb = cluster.getNameNode().getRpcServer()
       LocatedBlocks lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations("/tmp/x", 0, 16);
           .getBlockLocations("/tmp/x", 0, 16);
       // Create a new block object, because the block inside LocatedBlock at
       // Create a new block object, because the block inside LocatedBlock at
@@ -241,7 +244,7 @@ public class TestShortCircuitLocalRead {
       ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
       ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
-      ClientDatanodeProtocol proxy = aUgi
+      ClientDatanodeProtocol proxy = aUgi1
           .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
           .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
             @Override
             @Override
             public ClientDatanodeProtocol run() throws Exception {
             public ClientDatanodeProtocol run() throws Exception {
@@ -250,13 +253,29 @@ public class TestShortCircuitLocalRead {
             }
             }
           });
           });
       
       
-      //This should succeed
+      // This should succeed
       BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
       BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
       Assert.assertEquals(
       Assert.assertEquals(
           DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
           DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
           blpi.getBlockPath());
           blpi.getBlockPath());
 
 
-      // Now try with a not allowed user.
+      // Try with the other allowed user
+      proxy = aUgi2
+          .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
+            @Override
+            public ClientDatanodeProtocol run() throws Exception {
+              return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
+                  60000, false);
+            }
+          });
+
+      // This should succeed as well
+      blpi = proxy.getBlockLocalPathInfo(blk, token);
+      Assert.assertEquals(
+          DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
+          blpi.getBlockPath());
+
+      // Now try with a disallowed user
       UserGroupInformation bUgi = UserGroupInformation
       UserGroupInformation bUgi = UserGroupInformation
           .createRemoteUser("notalloweduser");
           .createRemoteUser("notalloweduser");
       proxy = bUgi
       proxy = bUgi

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -88,7 +88,7 @@ public class TestBalancer {
   /* create a file with a length of <code>fileLen</code> */
   /* create a file with a length of <code>fileLen</code> */
   static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
   static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
       short replicationFactor, int nnIndex)
       short replicationFactor, int nnIndex)
-  throws IOException {
+  throws IOException, InterruptedException, TimeoutException {
     FileSystem fs = cluster.getFileSystem(nnIndex);
     FileSystem fs = cluster.getFileSystem(nnIndex);
     DFSTestUtil.createFile(fs, filePath, fileLen, 
     DFSTestUtil.createFile(fs, filePath, fileLen, 
         replicationFactor, r.nextLong());
         replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
    * whose used space to be <code>size</code>
    * whose used space to be <code>size</code>
    */
    */
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
-      short numNodes) throws IOException {
+      short numNodes) throws IOException, InterruptedException, TimeoutException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.List;
 import java.util.List;
 import java.util.Random;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNameNodes {
 
 
   /* create a file with a length of <code>fileLen</code> */
   /* create a file with a length of <code>fileLen</code> */
   private static void createFile(Suite s, int index, long len
   private static void createFile(Suite s, int index, long len
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final FileSystem fs = s.cluster.getFileSystem(index);
     final FileSystem fs = s.cluster.getFileSystem(index);
     DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
     DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
     DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
     DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNameNodes {
    * whose used space to be <code>size</code>
    * whose used space to be <code>size</code>
    */
    */
   private static ExtendedBlock[][] generateBlocks(Suite s, long size
   private static ExtendedBlock[][] generateBlocks(Suite s, long size
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     for(int n = 0; n < s.clients.length; n++) {
     for(int n = 0; n < s.clients.length; n++) {
       final long fileLen = size/s.replication;
       final long fileLen = size/s.replication;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
    * corrupt ones.
    * corrupt ones.
    */
    */
   @Test
   @Test
-  public void testProcesOverReplicateBlock() throws IOException {
+  public void testProcesOverReplicateBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(
     conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
    * send heartbeats. 
    * send heartbeats. 
    */
    */
   @Test
   @Test
-  public void testChooseReplicaToDelete() throws IOException {
+  public void testChooseReplicaToDelete() throws Exception {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     FileSystem fs = null;
     try {
     try {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -114,6 +114,12 @@ public class DataNodeTestUtils {
         dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
         dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
   }
   }
   
   
+  public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
+    DataBlockScanner scanner = dn.getBlockScanner();
+    BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
+    bpScanner.verifyBlock(b);
+  }
+  
   public static void shutdownBlockScanner(DataNode dn) {
   public static void shutdownBlockScanner(DataNode dn) {
     if (dn.blockScanner != null) {
     if (dn.blockScanner != null) {
       dn.blockScanner.shutdown();
       dn.blockScanner.shutdown();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -89,7 +89,7 @@ public class TestBlockReplacement {
   }
   }
   
   
   @Test
   @Test
-  public void testBlockReplacement() throws IOException, TimeoutException {
+  public void testBlockReplacement() throws Exception {
     final Configuration CONF = new HdfsConfiguration();
     final Configuration CONF = new HdfsConfiguration();
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
     final String[] NEW_RACKS = {"/RACK2"};
     final String[] NEW_RACKS = {"/RACK2"};

+ 16 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java

@@ -27,6 +27,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.Random;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -65,7 +68,7 @@ import org.mockito.invocation.InvocationOnMock;
 
 
 /**
 /**
  * This test simulates a variety of situations when blocks are being
  * This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
  * report is happening
  * report is happening
  */
  */
 public class TestBlockReport {
 public class TestBlockReport {
@@ -316,7 +319,7 @@ public class TestBlockReport {
    * @throws IOException in case of an error
    * @throws IOException in case of an error
    */
    */
   @Test
   @Test
-  public void blockReport_06() throws IOException {
+  public void blockReport_06() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
     final int DN_N1 = DN_N0 + 1;
@@ -353,7 +356,7 @@ public class TestBlockReport {
   @Test
   @Test
   // Currently this test is failing as expected 'cause the correct behavior is
   // Currently this test is failing as expected 'cause the correct behavior is
   // not yet implemented (9/15/09)
   // not yet implemented (9/15/09)
-  public void blockReport_07() throws IOException {
+  public void blockReport_07() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
     final int DN_N1 = DN_N0 + 1;
@@ -670,21 +673,24 @@ public class TestBlockReport {
   }
   }
 
 
   private void startDNandWait(Path filePath, boolean waitReplicas) 
   private void startDNandWait(Path filePath, boolean waitReplicas) 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
+      throws IOException, InterruptedException, TimeoutException {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
       LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
     }
     }
     cluster.startDataNodes(conf, 1, true, null, null);
     cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.waitClusterUp();
     ArrayList<DataNode> datanodes = cluster.getDataNodes();
     ArrayList<DataNode> datanodes = cluster.getDataNodes();
     assertEquals(datanodes.size(), 2);
     assertEquals(datanodes.size(), 2);
 
 
-    if(LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       int lastDn = datanodes.size() - 1;
       int lastDn = datanodes.size() - 1;
       LOG.debug("New datanode "
       LOG.debug("New datanode "
           + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + " has been started");
           + " has been started");
     }
     }
-    if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    if (waitReplicas) {
+      DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    }
   }
   }
 
 
   private ArrayList<Block> prepareForRide(final Path filePath,
   private ArrayList<Block> prepareForRide(final Path filePath,
@@ -836,8 +842,9 @@ public class TestBlockReport {
     public void run() {
     public void run() {
       try {
       try {
         startDNandWait(filePath, true);
         startDNandWait(filePath, true);
-      } catch (IOException e) {
-        LOG.warn("Shouldn't happen", e);
+      } catch (Exception e) {
+        e.printStackTrace();
+        Assert.fail("Failed to start BlockChecker: " + e);
       }
       }
     }
     }
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
    * failure if the configuration parameter allows this.
    * failure if the configuration parameter allows this.
    */
    */
   @Test
   @Test
-  public void testVolumeFailure() throws IOException {
+  public void testVolumeFailure() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     dataDir = new File(cluster.getDataDirectory());
     dataDir = new File(cluster.getDataDirectory());
     System.out.println("Data dir: is " +  dataDir.getPath());
     System.out.println("Data dir: is " +  dataDir.getPath());

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java

@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
   }
   }
 
 
   // test recovering unlinked tmp replicas
   // test recovering unlinked tmp replicas
-  @Test public void testRecoverReplicas() throws IOException {
+  @Test public void testRecoverReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -31,9 +31,7 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
-import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
-import java.io.StringWriter;
 import java.net.URI;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -1238,10 +1236,8 @@ public class TestEditLog {
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
       } catch (Throwable t) {
       } catch (Throwable t) {
-        StringWriter sw = new StringWriter();
-        t.printStackTrace(new PrintWriter(sw));
-        fail("caught non-IOException throwable with message " +
-            t.getMessage() + "\nstack trace\n" + sw.toString());
+        fail("Caught non-IOException throwable " +
+             StringUtils.stringifyException(t));
       }
       }
     } finally {
     } finally {
       if ((elfos != null) && (elfos.isOpen()))
       if ((elfos != null) && (elfos.isOpen()))

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java

@@ -116,7 +116,7 @@ public class TestFSEditLogLoader {
    * automatically bumped up to the new minimum upon restart.
    * automatically bumped up to the new minimum upon restart.
    */
    */
   @Test
   @Test
-  public void testReplicationAdjusted() throws IOException {
+  public void testReplicationAdjusted() throws Exception {
     // start a cluster 
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     // Replicate and heartbeat fast to shave a few seconds off test
     // Replicate and heartbeat fast to shave a few seconds off test

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java

@@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (2) is equal to replication factor (2))
    *      replicas (2) is equal to replication factor (2))
    */
    */
   @Test
   @Test
-  public void testWhenDecreasingReplication() throws IOException {
+  public void testWhenDecreasingReplication() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks {
    * 
    * 
    */
    */
   @Test
   @Test
-  public void testByAddingAnExtraDataNode() throws IOException {
+  public void testByAddingAnExtraDataNode() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (1) is equal to replication factor (1))
    *      replicas (1) is equal to replication factor (1))
    */
    */
   @Test
   @Test
-  public void testWithReplicationFactorAsOne() throws IOException {
+  public void testWithReplicationFactorAsOne() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks {
    *    Verify that all replicas are corrupt and 3 replicas are present.
    *    Verify that all replicas are corrupt and 3 replicas are present.
    */
    */
   @Test
   @Test
-  public void testWithAllCorruptReplicas() throws IOException {
+  public void testWithAllCorruptReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -80,7 +80,7 @@ public class TestWebHDFS {
     }
     }
   }
   }
 
 
-  @Test
+  @Test(timeout=300000)
   public void testLargeFile() throws Exception {
   public void testLargeFile() throws Exception {
     largeFileTest(200L << 20); //200MB file length
     largeFileTest(200L << 20); //200MB file length
   }
   }
@@ -202,7 +202,7 @@ public class TestWebHDFS {
   }
   }
 
 
   /** Test client retry with namenode restarting. */
   /** Test client retry with namenode restarting. */
-  @Test
+  @Test(timeout=300000)
   public void testNamenodeRestart() throws Exception {
   public void testNamenodeRestart() throws Exception {
     ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
     final Configuration conf = WebHdfsTestUtil.createConf();
     final Configuration conf = WebHdfsTestUtil.createConf();

+ 6 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -138,6 +138,12 @@ Release 2.0.3-alpha - Unreleased
 
 
   BUG FIXES
   BUG FIXES
 
 
+    MAPREDUCE-4607. Race condition in ReduceTask completion can result in Task
+    being incorrectly failed. (Bikas Saha via tomwhite)
+
+    MAPREDUCE-4646. Fixed MR framework to send diagnostic information correctly
+    to clients in case of failed jobs also. (Jason Lowe via vinodkv)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 10 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -582,17 +582,23 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       String jobFile =
       String jobFile =
           remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
           remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
 
 
+      StringBuilder diagsb = new StringBuilder();
+      for (String s : getDiagnostics()) {
+        diagsb.append(s).append("\n");
+      }
+
       if (getState() == JobState.NEW) {
       if (getState() == JobState.NEW) {
         return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
         return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
             appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
             appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
-            cleanupProgress, jobFile, amInfos, isUber);
+            cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
       }
       }
 
 
       computeProgress();
       computeProgress();
-      return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
-          appSubmitTime, startTime, finishTime, setupProgress,
+      JobReport report = MRBuilderUtils.newJobReport(jobId, jobName, username,
+          state, appSubmitTime, startTime, finishTime, setupProgress,
           this.mapProgress, this.reduceProgress,
           this.mapProgress, this.reduceProgress,
-          cleanupProgress, jobFile, amInfos, isUber);
+          cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
+      return report;
     } finally {
     } finally {
       readLock.unlock();
       readLock.unlock();
     }
     }

+ 33 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -71,6 +71,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
 import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
@@ -86,6 +87,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
@@ -120,6 +122,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
@@ -128,6 +131,8 @@ import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RackResolver;
 import org.apache.hadoop.yarn.util.RackResolver;
 
 
+import com.google.common.base.Preconditions;
+
 /**
 /**
  * Implementation of TaskAttempt interface.
  * Implementation of TaskAttempt interface.
  */
  */
@@ -404,10 +409,10 @@ public abstract class TaskAttemptImpl implements
          TaskAttemptState.FAILED,
          TaskAttemptState.FAILED,
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
          new TooManyFetchFailureTransition())
          new TooManyFetchFailureTransition())
-     .addTransition(
-         TaskAttemptState.SUCCEEDED, TaskAttemptState.KILLED,
-         TaskAttemptEventType.TA_KILL,
-         new KilledAfterSuccessTransition())
+      .addTransition(TaskAttemptState.SUCCEEDED,
+          EnumSet.of(TaskAttemptState.SUCCEEDED, TaskAttemptState.KILLED),
+          TaskAttemptEventType.TA_KILL, 
+          new KilledAfterSuccessTransition())
      .addTransition(
      .addTransition(
          TaskAttemptState.SUCCEEDED, TaskAttemptState.SUCCEEDED,
          TaskAttemptState.SUCCEEDED, TaskAttemptState.SUCCEEDED,
          TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
          TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
@@ -1483,6 +1488,9 @@ public abstract class TaskAttemptImpl implements
     @SuppressWarnings("unchecked")
     @SuppressWarnings("unchecked")
     @Override
     @Override
     public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
     public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
+      // too many fetch failure can only happen for map tasks
+      Preconditions
+          .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
       //add to diagnostic
       //add to diagnostic
       taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
       taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
       //set the finish time
       //set the finish time
@@ -1506,15 +1514,30 @@ public abstract class TaskAttemptImpl implements
   }
   }
   
   
   private static class KilledAfterSuccessTransition implements
   private static class KilledAfterSuccessTransition implements
-      SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+      MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent, TaskAttemptState> {
 
 
     @SuppressWarnings("unchecked")
     @SuppressWarnings("unchecked")
     @Override
     @Override
-    public void transition(TaskAttemptImpl taskAttempt, 
+    public TaskAttemptState transition(TaskAttemptImpl taskAttempt, 
         TaskAttemptEvent event) {
         TaskAttemptEvent event) {
-      TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
-      //add to diagnostic
-      taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
+      if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
+        // after a reduce task has succeeded, its outputs are in safe in HDFS.
+        // logically such a task should not be killed. we only come here when
+        // there is a race condition in the event queue. E.g. some logic sends
+        // a kill request to this attempt when the successful completion event
+        // for this task is already in the event queue. so the kill event will
+        // get executed immediately after the attempt is marked successful and 
+        // result in this transition being exercised.
+        // ignore this for reduce tasks
+        LOG.info("Ignoring killed event for successful reduce task attempt" +
+                  taskAttempt.getID().toString());
+        return TaskAttemptState.SUCCEEDED;
+      }
+      if(event instanceof TaskAttemptKillEvent) {
+        TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
+        //add to diagnostic
+        taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
+      }
 
 
       // not setting a finish time since it was set on success
       // not setting a finish time since it was set on success
       assert (taskAttempt.getFinishTime() != 0);
       assert (taskAttempt.getFinishTime() != 0);
@@ -1528,6 +1551,7 @@ public abstract class TaskAttemptImpl implements
           .getTaskId().getJobId(), tauce));
           .getTaskId().getJobId(), tauce));
       taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
       taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
           taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
           taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
+      return TaskAttemptState.KILLED;
     }
     }
   }
   }
 
 

+ 37 - 33
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -191,12 +191,12 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
             TaskEventType.T_ADD_SPEC_ATTEMPT))
             TaskEventType.T_ADD_SPEC_ATTEMPT))
 
 
     // Transitions from SUCCEEDED state
     // Transitions from SUCCEEDED state
-    .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+    .addTransition(TaskState.SUCCEEDED,
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED, TaskState.FAILED),
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED, TaskState.FAILED),
-        TaskEventType.T_ATTEMPT_FAILED, new MapRetroactiveFailureTransition())
-    .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+        TaskEventType.T_ATTEMPT_FAILED, new RetroactiveFailureTransition())
+    .addTransition(TaskState.SUCCEEDED,
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED),
         EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED),
-        TaskEventType.T_ATTEMPT_KILLED, new MapRetroactiveKilledTransition())
+        TaskEventType.T_ATTEMPT_KILLED, new RetroactiveKilledTransition())
     // Ignore-able transitions.
     // Ignore-able transitions.
     .addTransition(
     .addTransition(
         TaskState.SUCCEEDED, TaskState.SUCCEEDED,
         TaskState.SUCCEEDED, TaskState.SUCCEEDED,
@@ -897,7 +897,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
     }
     }
   }
   }
 
 
-  private static class MapRetroactiveFailureTransition
+  private static class RetroactiveFailureTransition
       extends AttemptFailedTransition {
       extends AttemptFailedTransition {
 
 
     @Override
     @Override
@@ -911,8 +911,8 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
           return TaskState.SUCCEEDED;
           return TaskState.SUCCEEDED;
         }
         }
       }
       }
-      
-      //verify that this occurs only for map task
+
+      // a successful REDUCE task should not be overridden
       //TODO: consider moving it to MapTaskImpl
       //TODO: consider moving it to MapTaskImpl
       if (!TaskType.MAP.equals(task.getType())) {
       if (!TaskType.MAP.equals(task.getType())) {
         LOG.error("Unexpected event for REDUCE task " + event.getType());
         LOG.error("Unexpected event for REDUCE task " + event.getType());
@@ -938,42 +938,46 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
     }
     }
   }
   }
 
 
-  private static class MapRetroactiveKilledTransition implements
+  private static class RetroactiveKilledTransition implements
     MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
     MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
 
 
     @Override
     @Override
     public TaskState transition(TaskImpl task, TaskEvent event) {
     public TaskState transition(TaskImpl task, TaskEvent event) {
-      // verify that this occurs only for map task
+      TaskAttemptId attemptId = null;
+      if (event instanceof TaskTAttemptEvent) {
+        TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
+        attemptId = castEvent.getTaskAttemptID(); 
+        if (task.getState() == TaskState.SUCCEEDED &&
+            !attemptId.equals(task.successfulAttempt)) {
+          // don't allow a different task attempt to override a previous
+          // succeeded state
+          return TaskState.SUCCEEDED;
+        }
+      }
+
+      // a successful REDUCE task should not be overridden
       // TODO: consider moving it to MapTaskImpl
       // TODO: consider moving it to MapTaskImpl
       if (!TaskType.MAP.equals(task.getType())) {
       if (!TaskType.MAP.equals(task.getType())) {
         LOG.error("Unexpected event for REDUCE task " + event.getType());
         LOG.error("Unexpected event for REDUCE task " + event.getType());
         task.internalError(event.getType());
         task.internalError(event.getType());
       }
       }
 
 
-      TaskTAttemptEvent attemptEvent = (TaskTAttemptEvent) event;
-      TaskAttemptId attemptId = attemptEvent.getTaskAttemptID();
-      if(task.successfulAttempt == attemptId) {
-        // successful attempt is now killed. reschedule
-        // tell the job about the rescheduling
-        unSucceed(task);
-        task.handleTaskAttemptCompletion(
-            attemptId, 
-            TaskAttemptCompletionEventStatus.KILLED);
-        task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
-        // typically we are here because this map task was run on a bad node and 
-        // we want to reschedule it on a different node.
-        // Depending on whether there are previous failed attempts or not this 
-        // can SCHEDULE or RESCHEDULE the container allocate request. If this
-        // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
-        // from the map splitInfo. So the bad node might be sent as a location 
-        // to the RM. But the RM would ignore that just like it would ignore 
-        // currently pending container requests affinitized to bad nodes.
-        task.addAndScheduleAttempt();
-        return TaskState.SCHEDULED;
-      } else {
-        // nothing to do
-        return TaskState.SUCCEEDED;
-      }
+      // successful attempt is now killed. reschedule
+      // tell the job about the rescheduling
+      unSucceed(task);
+      task.handleTaskAttemptCompletion(attemptId,
+          TaskAttemptCompletionEventStatus.KILLED);
+      task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
+      // typically we are here because this map task was run on a bad node and
+      // we want to reschedule it on a different node.
+      // Depending on whether there are previous failed attempts or not this
+      // can SCHEDULE or RESCHEDULE the container allocate request. If this
+      // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
+      // from the map splitInfo. So the bad node might be sent as a location
+      // to the RM. But the RM would ignore that just like it would ignore
+      // currently pending container requests affinitized to bad nodes.
+      task.addAndScheduleAttempt();
+      return TaskState.SCHEDULED;
     }
     }
   }
   }
 
 

+ 28 - 10
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java

@@ -180,7 +180,7 @@ public class TestMRApp {
   @Test
   @Test
   public void testUpdatedNodes() throws Exception {
   public void testUpdatedNodes() throws Exception {
     int runCount = 0;
     int runCount = 0;
-    MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(),
+    MRApp app = new MRAppWithHistory(2, 2, false, this.getClass().getName(),
         true, ++runCount);
         true, ++runCount);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     // after half of the map completion, reduce will start
     // after half of the map completion, reduce will start
@@ -189,7 +189,7 @@ public class TestMRApp {
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     Job job = app.submit(conf);
     Job job = app.submit(conf);
     app.waitForState(job, JobState.RUNNING);
     app.waitForState(job, JobState.RUNNING);
-    Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
+    Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
     Iterator<Task> it = job.getTasks().values().iterator();
     Iterator<Task> it = job.getTasks().values().iterator();
     Task mapTask1 = it.next();
     Task mapTask1 = it.next();
     Task mapTask2 = it.next();
     Task mapTask2 = it.next();
@@ -272,18 +272,19 @@ public class TestMRApp {
 
 
     // rerun
     // rerun
     // in rerun the 1st map will be recovered from previous run
     // in rerun the 1st map will be recovered from previous run
-    app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
+    app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), false,
         ++runCount);
         ++runCount);
     conf = new Configuration();
     conf = new Configuration();
     conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
     conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
     job = app.submit(conf);
     job = app.submit(conf);
     app.waitForState(job, JobState.RUNNING);
     app.waitForState(job, JobState.RUNNING);
-    Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
+    Assert.assertEquals("No of tasks not correct", 4, job.getTasks().size());
     it = job.getTasks().values().iterator();
     it = job.getTasks().values().iterator();
     mapTask1 = it.next();
     mapTask1 = it.next();
     mapTask2 = it.next();
     mapTask2 = it.next();
-    Task reduceTask = it.next();
+    Task reduceTask1 = it.next();
+    Task reduceTask2 = it.next();
 
 
     // map 1 will be recovered, no need to send done
     // map 1 will be recovered, no need to send done
     app.waitForState(mapTask1, TaskState.SUCCEEDED);
     app.waitForState(mapTask1, TaskState.SUCCEEDED);
@@ -306,19 +307,36 @@ public class TestMRApp {
     Assert.assertEquals("Expecting 1 more completion events for success", 3,
     Assert.assertEquals("Expecting 1 more completion events for success", 3,
         events.length);
         events.length);
 
 
-    app.waitForState(reduceTask, TaskState.RUNNING);
-    TaskAttempt task3Attempt = reduceTask.getAttempts().values().iterator()
+    app.waitForState(reduceTask1, TaskState.RUNNING);
+    app.waitForState(reduceTask2, TaskState.RUNNING);
+
+    TaskAttempt task3Attempt = reduceTask1.getAttempts().values().iterator()
         .next();
         .next();
     app.getContext()
     app.getContext()
         .getEventHandler()
         .getEventHandler()
         .handle(
         .handle(
             new TaskAttemptEvent(task3Attempt.getID(),
             new TaskAttemptEvent(task3Attempt.getID(),
                 TaskAttemptEventType.TA_DONE));
                 TaskAttemptEventType.TA_DONE));
-    app.waitForState(reduceTask, TaskState.SUCCEEDED);
+    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+    app.getContext()
+    .getEventHandler()
+    .handle(
+        new TaskAttemptEvent(task3Attempt.getID(),
+            TaskAttemptEventType.TA_KILL));
+    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+    
+    TaskAttempt task4Attempt = reduceTask2.getAttempts().values().iterator()
+        .next();
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task4Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+    app.waitForState(reduceTask2, TaskState.SUCCEEDED);    
 
 
     events = job.getTaskAttemptCompletionEvents(0, 100);
     events = job.getTaskAttemptCompletionEvents(0, 100);
-    Assert.assertEquals("Expecting 1 more completion events for success", 4,
-        events.length);
+    Assert.assertEquals("Expecting 2 more completion events for reduce success",
+        5, events.length);
 
 
     // job succeeds
     // job succeeds
     app.waitForState(job, JobState.SUCCEEDED);
     app.waitForState(job, JobState.SUCCEEDED);

+ 7 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java

@@ -138,7 +138,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
         appAttemptId, mockJob);
 
 
@@ -215,7 +215,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
         appAttemptId, mockJob);
 
 
@@ -281,7 +281,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
         appAttemptId, mockJob);
 
 
@@ -723,7 +723,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
         appAttemptId, mockJob);
 
 
@@ -827,7 +827,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator =
     MyContainerAllocator allocator =
         new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
         new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
 
 
@@ -993,7 +993,7 @@ public class TestRMContainerAllocator {
     Job mockJob = mock(Job.class);
     Job mockJob = mock(Job.class);
     when(mockJob.getReport()).thenReturn(
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
     MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
         appAttemptId, mockJob);
         appAttemptId, mockJob);
 
 
@@ -1445,7 +1445,7 @@ public class TestRMContainerAllocator {
     Job job = mock(Job.class);
     Job job = mock(Job.class);
     when(job.getReport()).thenReturn(
     when(job.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false));
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
     doReturn(10).when(job).getTotalMaps();
     doReturn(10).when(job).getTotalMaps();
     doReturn(10).when(job).getTotalReduces();
     doReturn(10).when(job).getTotalReduces();
     doReturn(0).when(job).getCompletedMaps();
     doReturn(0).when(job).getCompletedMaps();

+ 40 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java

@@ -45,11 +45,14 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.SystemClock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
@@ -172,6 +175,8 @@ public class TestJobImpl {
     t.testCheckJobCompleteSuccess();
     t.testCheckJobCompleteSuccess();
     t.testCheckJobCompleteSuccessFailed();
     t.testCheckJobCompleteSuccessFailed();
     t.testCheckAccess();
     t.testCheckAccess();
+    t.testReportDiagnostics();
+    t.testUberDecision();
   }
   }
 
 
   @Test
   @Test
@@ -241,6 +246,41 @@ public class TestJobImpl {
     Assert.assertTrue(job5.checkAccess(ugi1, null));
     Assert.assertTrue(job5.checkAccess(ugi1, null));
     Assert.assertTrue(job5.checkAccess(ugi2, null));
     Assert.assertTrue(job5.checkAccess(ugi2, null));
   }
   }
+
+  @Test
+  public void testReportDiagnostics() throws Exception {
+    JobID jobID = JobID.forName("job_1234567890000_0001");
+    JobId jobId = TypeConverter.toYarn(jobID);
+    final String diagMsg = "some diagnostic message";
+    final JobDiagnosticsUpdateEvent diagUpdateEvent =
+        new JobDiagnosticsUpdateEvent(jobId, diagMsg);
+    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
+    JobImpl job = new JobImpl(jobId, Records
+        .newRecord(ApplicationAttemptId.class), new Configuration(),
+        mock(EventHandler.class),
+        null, mock(JobTokenSecretManager.class), null,
+        new SystemClock(), null,
+        mrAppMetrics, mock(OutputCommitter.class),
+        true, null, 0, null, null);
+    job.handle(diagUpdateEvent);
+    String diagnostics = job.getReport().getDiagnostics();
+    Assert.assertNotNull(diagnostics);
+    Assert.assertTrue(diagnostics.contains(diagMsg));
+
+    job = new JobImpl(jobId, Records
+        .newRecord(ApplicationAttemptId.class), new Configuration(),
+        mock(EventHandler.class),
+        null, mock(JobTokenSecretManager.class), null,
+        new SystemClock(), null,
+        mrAppMetrics, mock(OutputCommitter.class),
+        true, null, 0, null, null);
+    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
+    job.handle(diagUpdateEvent);
+    diagnostics = job.getReport().getDiagnostics();
+    Assert.assertNotNull(diagnostics);
+    Assert.assertTrue(diagnostics.contains(diagMsg));
+  }
+
   @Test
   @Test
   public void testUberDecision() throws Exception {
   public void testUberDecision() throws Exception {
 
 

+ 54 - 15
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java

@@ -84,7 +84,6 @@ public class TestTaskImpl {
   private ApplicationId appId;
   private ApplicationId appId;
   private TaskSplitMetaInfo taskSplitMetaInfo;  
   private TaskSplitMetaInfo taskSplitMetaInfo;  
   private String[] dataLocations = new String[0]; 
   private String[] dataLocations = new String[0]; 
-  private final TaskType taskType = TaskType.MAP;
   private AppContext appContext;
   private AppContext appContext;
   
   
   private int startCount = 0;
   private int startCount = 0;
@@ -97,6 +96,7 @@ public class TestTaskImpl {
   private class MockTaskImpl extends TaskImpl {
   private class MockTaskImpl extends TaskImpl {
         
         
     private int taskAttemptCounter = 0;
     private int taskAttemptCounter = 0;
+    TaskType taskType;
 
 
     public MockTaskImpl(JobId jobId, int partition,
     public MockTaskImpl(JobId jobId, int partition,
         EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
         EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
@@ -104,11 +104,12 @@ public class TestTaskImpl {
         Token<JobTokenIdentifier> jobToken,
         Token<JobTokenIdentifier> jobToken,
         Credentials credentials, Clock clock,
         Credentials credentials, Clock clock,
         Map<TaskId, TaskInfo> completedTasksFromPreviousRun, int startCount,
         Map<TaskId, TaskInfo> completedTasksFromPreviousRun, int startCount,
-        MRAppMetrics metrics, AppContext appContext) {
+        MRAppMetrics metrics, AppContext appContext, TaskType taskType) {
       super(jobId, taskType , partition, eventHandler,
       super(jobId, taskType , partition, eventHandler,
           remoteJobConfFile, conf, taskAttemptListener, committer, 
           remoteJobConfFile, conf, taskAttemptListener, committer, 
           jobToken, credentials, clock,
           jobToken, credentials, clock,
           completedTasksFromPreviousRun, startCount, metrics, appContext);
           completedTasksFromPreviousRun, startCount, metrics, appContext);
+      this.taskType = taskType;
     }
     }
 
 
     @Override
     @Override
@@ -120,7 +121,7 @@ public class TestTaskImpl {
     protected TaskAttemptImpl createAttempt() {
     protected TaskAttemptImpl createAttempt() {
       MockTaskAttemptImpl attempt = new MockTaskAttemptImpl(getID(), ++taskAttemptCounter, 
       MockTaskAttemptImpl attempt = new MockTaskAttemptImpl(getID(), ++taskAttemptCounter, 
           eventHandler, taskAttemptListener, remoteJobConfFile, partition,
           eventHandler, taskAttemptListener, remoteJobConfFile, partition,
-          conf, committer, jobToken, credentials, clock, appContext);
+          conf, committer, jobToken, credentials, clock, appContext, taskType);
       taskAttempts.add(attempt);
       taskAttempts.add(attempt);
       return attempt;
       return attempt;
     }
     }
@@ -142,18 +143,20 @@ public class TestTaskImpl {
     private float progress = 0;
     private float progress = 0;
     private TaskAttemptState state = TaskAttemptState.NEW;
     private TaskAttemptState state = TaskAttemptState.NEW;
     private TaskAttemptId attemptId;
     private TaskAttemptId attemptId;
+    private TaskType taskType;
 
 
     public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
     public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
         TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
         TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
         JobConf conf, OutputCommitter committer,
         JobConf conf, OutputCommitter committer,
         Token<JobTokenIdentifier> jobToken,
         Token<JobTokenIdentifier> jobToken,
         Credentials credentials, Clock clock,
         Credentials credentials, Clock clock,
-        AppContext appContext) {
+        AppContext appContext, TaskType taskType) {
       super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
       super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
           dataLocations, committer, jobToken, credentials, clock, appContext);
           dataLocations, committer, jobToken, credentials, clock, appContext);
       attemptId = Records.newRecord(TaskAttemptId.class);
       attemptId = Records.newRecord(TaskAttemptId.class);
       attemptId.setId(id);
       attemptId.setId(id);
       attemptId.setTaskId(taskId);
       attemptId.setTaskId(taskId);
+      this.taskType = taskType;
     }
     }
 
 
     public TaskAttemptId getAttemptId() {
     public TaskAttemptId getAttemptId() {
@@ -162,7 +165,7 @@ public class TestTaskImpl {
     
     
     @Override
     @Override
     protected Task createRemoteTask() {
     protected Task createRemoteTask() {
-      return new MockTask();
+      return new MockTask(taskType);
     }    
     }    
     
     
     public float getProgress() {
     public float getProgress() {
@@ -185,6 +188,11 @@ public class TestTaskImpl {
   
   
   private class MockTask extends Task {
   private class MockTask extends Task {
 
 
+    private TaskType taskType;
+    MockTask(TaskType taskType) {
+      this.taskType = taskType;
+    }
+    
     @Override
     @Override
     public void run(JobConf job, TaskUmbilicalProtocol umbilical)
     public void run(JobConf job, TaskUmbilicalProtocol umbilical)
         throws IOException, ClassNotFoundException, InterruptedException {
         throws IOException, ClassNotFoundException, InterruptedException {
@@ -193,7 +201,7 @@ public class TestTaskImpl {
 
 
     @Override
     @Override
     public boolean isMapTask() {
     public boolean isMapTask() {
-      return true;
+      return (taskType == TaskType.MAP);
     }    
     }    
     
     
   }
   }
@@ -227,14 +235,15 @@ public class TestTaskImpl {
     taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
     taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
     when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
     when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
     
     
-    taskAttempts = new ArrayList<MockTaskAttemptImpl>();
-    
-    mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
+    taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
+  }
+  
+  private MockTaskImpl createMockTask(TaskType taskType) {
+    return new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
         remoteJobConfFile, conf, taskAttemptListener, committer, jobToken,
         remoteJobConfFile, conf, taskAttemptListener, committer, jobToken,
         credentials, clock,
         credentials, clock,
         completedTasksFromPreviousRun, startCount,
         completedTasksFromPreviousRun, startCount,
-        metrics, appContext);        
-    
+        metrics, appContext, taskType);
   }
   }
 
 
   @After 
   @After 
@@ -342,6 +351,7 @@ public class TestTaskImpl {
   @Test
   @Test
   public void testInit() {
   public void testInit() {
     LOG.info("--- START: testInit ---");
     LOG.info("--- START: testInit ---");
+    mockTask = createMockTask(TaskType.MAP);        
     assertTaskNewState();
     assertTaskNewState();
     assert(taskAttempts.size() == 0);
     assert(taskAttempts.size() == 0);
   }
   }
@@ -352,6 +362,7 @@ public class TestTaskImpl {
    */
    */
   public void testScheduleTask() {
   public void testScheduleTask() {
     LOG.info("--- START: testScheduleTask ---");
     LOG.info("--- START: testScheduleTask ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
   }
   }
@@ -362,6 +373,7 @@ public class TestTaskImpl {
    */
    */
   public void testKillScheduledTask() {
   public void testKillScheduledTask() {
     LOG.info("--- START: testKillScheduledTask ---");
     LOG.info("--- START: testKillScheduledTask ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     killTask(taskId);
     killTask(taskId);
@@ -374,6 +386,7 @@ public class TestTaskImpl {
    */
    */
   public void testKillScheduledTaskAttempt() {
   public void testKillScheduledTaskAttempt() {
     LOG.info("--- START: testKillScheduledTaskAttempt ---");
     LOG.info("--- START: testKillScheduledTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     killScheduledTaskAttempt(getLastAttempt().getAttemptId());
     killScheduledTaskAttempt(getLastAttempt().getAttemptId());
@@ -386,6 +399,7 @@ public class TestTaskImpl {
    */
    */
   public void testLaunchTaskAttempt() {
   public void testLaunchTaskAttempt() {
     LOG.info("--- START: testLaunchTaskAttempt ---");
     LOG.info("--- START: testLaunchTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -398,6 +412,7 @@ public class TestTaskImpl {
    */
    */
   public void testKillRunningTaskAttempt() {
   public void testKillRunningTaskAttempt() {
     LOG.info("--- START: testKillRunningTaskAttempt ---");
     LOG.info("--- START: testKillRunningTaskAttempt ---");
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -407,6 +422,7 @@ public class TestTaskImpl {
   @Test 
   @Test 
   public void testTaskProgress() {
   public void testTaskProgress() {
     LOG.info("--- START: testTaskProgress ---");
     LOG.info("--- START: testTaskProgress ---");
+    mockTask = createMockTask(TaskType.MAP);        
         
         
     // launch task
     // launch task
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
@@ -444,6 +460,7 @@ public class TestTaskImpl {
   
   
   @Test
   @Test
   public void testFailureDuringTaskAttemptCommit() {
   public void testFailureDuringTaskAttemptCommit() {
+    mockTask = createMockTask(TaskType.MAP);        
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -469,8 +486,7 @@ public class TestTaskImpl {
     assertTaskSucceededState();
     assertTaskSucceededState();
   }
   }
   
   
-  @Test
-  public void testSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+  private void runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType failEvent) {
     TaskId taskId = getNewTaskID();
     TaskId taskId = getNewTaskID();
     scheduleTaskAttempt(taskId);
     scheduleTaskAttempt(taskId);
     launchTaskAttempt(getLastAttempt().getAttemptId());
     launchTaskAttempt(getLastAttempt().getAttemptId());
@@ -489,11 +505,34 @@ public class TestTaskImpl {
     
     
     // Now fail the first task attempt, after the second has succeeded
     // Now fail the first task attempt, after the second has succeeded
     mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(0).getAttemptId(), 
     mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(0).getAttemptId(), 
-        TaskEventType.T_ATTEMPT_FAILED));
+        failEvent));
     
     
     // The task should still be in the succeeded state
     // The task should still be in the succeeded state
     assertTaskSucceededState();
     assertTaskSucceededState();
-    
+  }
+  
+  @Test
+  public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+    mockTask = createMockTask(TaskType.MAP);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_FAILED);
+  }
+
+  @Test
+  public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
+    mockTask = createMockTask(TaskType.REDUCE);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_FAILED);
+  }
+  
+  @Test
+  public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
+    mockTask = createMockTask(TaskType.MAP);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_KILLED);
+  }
+
+  @Test
+  public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
+    mockTask = createMockTask(TaskType.REDUCE);        
+    runSpeculativeTaskAttemptSucceedsEvenIfFirstFails(TaskEventType.T_ATTEMPT_KILLED);
   }
   }
 
 
 }
 }

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java

@@ -67,7 +67,7 @@ public class MRBuilderUtils {
       String userName, JobState state, long submitTime, long startTime, long finishTime,
       String userName, JobState state, long submitTime, long startTime, long finishTime,
       float setupProgress, float mapProgress, float reduceProgress,
       float setupProgress, float mapProgress, float reduceProgress,
       float cleanupProgress, String jobFile, List<AMInfo> amInfos,
       float cleanupProgress, String jobFile, List<AMInfo> amInfos,
-      boolean isUber) {
+      boolean isUber, String diagnostics) {
     JobReport report = Records.newRecord(JobReport.class);
     JobReport report = Records.newRecord(JobReport.class);
     report.setJobId(jobId);
     report.setJobId(jobId);
     report.setJobName(jobName);
     report.setJobName(jobName);
@@ -83,6 +83,7 @@ public class MRBuilderUtils {
     report.setJobFile(jobFile);
     report.setJobFile(jobFile);
     report.setAMInfos(amInfos);
     report.setAMInfos(amInfos);
     report.setIsUber(isUber);
     report.setIsUber(isUber);
+    report.setDiagnostics(diagnostics);
     return report;
     return report;
   }
   }
 
 

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java

@@ -520,5 +520,10 @@ public class ConfigUtil {
         MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT   
         MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT   
     });
     });
   }
   }
+
+  public static void main(String[] args) {
+    loadResources();
+    Configuration.dumpDeprecatedKeys();
+  }
 }
 }
 
 

+ 4 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -219,7 +219,8 @@ public class TestClientServiceDelegate {
     GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class);
     GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class);
     when(jobReportResponse1.getJobReport()).thenReturn(
     when(jobReportResponse1.getJobReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user",
         MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user",
-            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false));
+            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
+            false, ""));
 
 
     // First AM returns a report with jobName firstGen and simulates AM shutdown
     // First AM returns a report with jobName firstGen and simulates AM shutdown
     // on second invocation.
     // on second invocation.
@@ -231,7 +232,8 @@ public class TestClientServiceDelegate {
     GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class);
     GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class);
     when(jobReportResponse2.getJobReport()).thenReturn(
     when(jobReportResponse2.getJobReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user",
         MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user",
-            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false));
+            JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
+            false, ""));
 
 
     // Second AM generation returns a report with jobName secondGen
     // Second AM generation returns a report with jobName secondGen
     MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class);
     MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class);

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java

@@ -23,6 +23,7 @@ import static org.mockito.Mockito.when;
 
 
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -95,7 +96,7 @@ public class TestFileInputFormat extends TestCase {
   }
   }
 
 
   private void createInputs(FileSystem fs, Path inDir, String fileName)
   private void createInputs(FileSystem fs, Path inDir, String fileName)
-  throws IOException {
+      throws IOException, TimeoutException, InterruptedException {
     // create a multi-block file on hdfs
     // create a multi-block file on hdfs
     Path path = new Path(inDir, fileName);
     Path path = new Path(inDir, fileName);
     final short replication = 2;
     final short replication = 2;
@@ -157,7 +158,7 @@ public class TestFileInputFormat extends TestCase {
     }
     }
   }
   }
 
 
-  public void testMultiLevelInput() throws IOException {
+  public void testMultiLevelInput() throws Exception {
     JobConf job = new JobConf(conf);
     JobConf job = new JobConf(conf);
 
 
     job.setBoolean("dfs.replication.considerLoad", false);
     job.setBoolean("dfs.replication.considerLoad", false);
@@ -291,7 +292,8 @@ public class TestFileInputFormat extends TestCase {
   }
   }
 
 
   static void writeFile(Configuration conf, Path name,
   static void writeFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+      short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
     FileSystem fileSys = FileSystem.get(conf);
 
 
     FSDataOutputStream stm = fileSys.create(name, true,
     FSDataOutputStream stm = fileSys.create(name, true,

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java

@@ -71,13 +71,13 @@ public class TestMultipleLevelCaching extends TestCase {
     return rack.toString();
     return rack.toString();
   }
   }
 
 
-  public void testMultiLevelCaching() throws IOException {
+  public void testMultiLevelCaching() throws Exception {
     for (int i = 1 ; i <= MAX_LEVEL; ++i) {
     for (int i = 1 ; i <= MAX_LEVEL; ++i) {
       testCachingAtLevel(i);
       testCachingAtLevel(i);
     }
     }
   }
   }
 
 
-  private void testCachingAtLevel(int level) throws IOException {
+  private void testCachingAtLevel(int level) throws Exception {
     String namenode = null;
     String namenode = null;
     MiniDFSCluster dfs = null;
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
     MiniMRCluster mr = null;

+ 21 - 12
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java

@@ -31,6 +31,7 @@ import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Properties;
 import java.util.Properties;
+import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -449,11 +450,14 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           String mapSignalFile, 
                           String mapSignalFile, 
                           String reduceSignalFile, int replication) 
                           String reduceSignalFile, int replication) 
-  throws IOException {
-    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
-              (short)replication);
-    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), 
-              (short)replication);
+      throws IOException, TimeoutException {
+    try {
+      writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
+                (short)replication);
+      writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), (short)replication);
+    } catch (InterruptedException ie) {
+      // Ignore
+    }
   }
   }
   
   
   /**
   /**
@@ -462,12 +466,16 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           boolean isMap, String mapSignalFile, 
                           boolean isMap, String mapSignalFile, 
                           String reduceSignalFile)
                           String reduceSignalFile)
-  throws IOException {
-    //  signal the maps to complete
-    writeFile(dfs.getNameNode(), fileSys.getConf(),
-              isMap 
-              ? new Path(mapSignalFile)
-              : new Path(reduceSignalFile), (short)1);
+      throws IOException, TimeoutException {
+    try {
+      //  signal the maps to complete
+      writeFile(dfs.getNameNode(), fileSys.getConf(),
+                isMap 
+                ? new Path(mapSignalFile)
+                : new Path(reduceSignalFile), (short)1);
+    } catch (InterruptedException ie) {
+      // Ignore
+    }
   }
   }
   
   
   static String getSignalFile(Path dir) {
   static String getSignalFile(Path dir) {
@@ -483,7 +491,8 @@ public class UtilsForTests {
   }
   }
   
   
   static void writeFile(NameNode namenode, Configuration conf, Path name, 
   static void writeFile(NameNode namenode, Configuration conf, Path name, 
-      short replication) throws IOException {
+                        short replication)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
     FileSystem fileSys = FileSystem.get(conf);
     SequenceFile.Writer writer = 
     SequenceFile.Writer writer = 
       SequenceFile.createWriter(fileSys, conf, name, 
       SequenceFile.createWriter(fileSys, conf, name, 

+ 10 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java

@@ -23,6 +23,7 @@ import java.net.URI;
 import java.util.List;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.zip.GZIPOutputStream;
 import java.util.zip.GZIPOutputStream;
+import java.util.concurrent.TimeoutException;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
@@ -278,7 +279,7 @@ public class TestCombineFileInputFormat extends TestCase {
     assertFalse(rr.nextKeyValue());
     assertFalse(rr.nextKeyValue());
   }
   }
 
 
-  public void testSplitPlacement() throws IOException {
+  public void testSplitPlacement() throws Exception {
     MiniDFSCluster dfs = null;
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
     FileSystem fileSys = null;
     try {
     try {
@@ -678,7 +679,8 @@ public class TestCombineFileInputFormat extends TestCase {
   }
   }
 
 
   static void writeFile(Configuration conf, Path name,
   static void writeFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+                        short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
     FileSystem fileSys = FileSystem.get(conf);
 
 
     FSDataOutputStream stm = fileSys.create(name, true,
     FSDataOutputStream stm = fileSys.create(name, true,
@@ -689,7 +691,8 @@ public class TestCombineFileInputFormat extends TestCase {
 
 
   // Creates the gzip file and return the FileStatus
   // Creates the gzip file and return the FileStatus
   static FileStatus writeGzipFile(Configuration conf, Path name,
   static FileStatus writeGzipFile(Configuration conf, Path name,
-      short replication, int numBlocks) throws IOException {
+      short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     FileSystem fileSys = FileSystem.get(conf);
     FileSystem fileSys = FileSystem.get(conf);
 
 
     GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
     GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
@@ -699,7 +702,8 @@ public class TestCombineFileInputFormat extends TestCase {
   }
   }
 
 
   private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
   private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
-      OutputStream out, short replication, int numBlocks) throws IOException {
+        OutputStream out, short replication, int numBlocks)
+      throws IOException, TimeoutException, InterruptedException {
     for (int i = 0; i < numBlocks; i++) {
     for (int i = 0; i < numBlocks; i++) {
       out.write(databuf);
       out.write(databuf);
     }
     }
@@ -707,7 +711,7 @@ public class TestCombineFileInputFormat extends TestCase {
     DFSTestUtil.waitReplication(fileSys, name, replication);
     DFSTestUtil.waitReplication(fileSys, name, replication);
   }
   }
   
   
-  public void testSplitPlacementForCompressedFiles() throws IOException {
+  public void testSplitPlacementForCompressedFiles() throws Exception {
     MiniDFSCluster dfs = null;
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
     FileSystem fileSys = null;
     try {
     try {
@@ -1058,7 +1062,7 @@ public class TestCombineFileInputFormat extends TestCase {
   /**
   /**
    * Test that CFIF can handle missing blocks.
    * Test that CFIF can handle missing blocks.
    */
    */
-  public void testMissingBlocks() throws IOException {
+  public void testMissingBlocks() throws Exception {
     String namenode = null;
     String namenode = null;
     MiniDFSCluster dfs = null;
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
     FileSystem fileSys = null;

+ 12 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml

@@ -172,6 +172,18 @@
           <effort>Max</effort>
           <effort>Max</effort>
        </configuration>
        </configuration>
      </plugin>
      </plugin>
+     <plugin>
+       <groupId>org.apache.maven.plugins</groupId>
+       <artifactId>maven-surefire-plugin</artifactId>
+       <configuration>
+         <properties>
+           <property>
+             <name>listener</name>
+             <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+           </property>
+         </properties>
+       </configuration>
+     </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
  
  

+ 12 - 0
hadoop-mapreduce-project/pom.xml

@@ -220,6 +220,18 @@
           </includes>
           </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
+        </configuration>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 

+ 3 - 3
hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java

@@ -52,11 +52,11 @@ public class TestHadoopArchives extends TestCase {
 
 
   {
   {
     ((Log4JLogger)LogFactory.getLog(org.apache.hadoop.security.Groups.class)
     ((Log4JLogger)LogFactory.getLog(org.apache.hadoop.security.Groups.class)
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)org.apache.hadoop.ipc.Server.LOG
     ((Log4JLogger)org.apache.hadoop.ipc.Server.LOG
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)org.apache.hadoop.util.AsyncDiskService.LOG
     ((Log4JLogger)org.apache.hadoop.util.AsyncDiskService.LOG
-        ).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
   }
   }
 
 
   private static final String inputDir = "input";
   private static final String inputDir = "input";

+ 3 - 3
hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java

@@ -61,9 +61,9 @@ import org.junit.Ignore;
 public class TestCopyFiles extends TestCase {
 public class TestCopyFiles extends TestCase {
   {
   {
     ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
     ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
+        ).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
     ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
   }
   }
   
   

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio