浏览代码

Merge r1332460 through r1334157 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1334158 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 年之前
父节点
当前提交
794f2506b9
共有 100 个文件被更改,包括 1998 次插入608 次删除
  1. 0 21
      dev-support/test-patch.properties
  2. 93 33
      dev-support/test-patch.sh
  3. 0 21
      hadoop-common-project/dev-support/test-patch.properties
  4. 3 5
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  5. 8 5
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  6. 2 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  7. 3 3
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  8. 39 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  9. 8 6
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  10. 6 3
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml
  11. 2 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  12. 109 32
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  13. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  14. 1 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  15. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
  16. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  17. 34 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  18. 34 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java
  19. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  20. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  21. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
  22. 23 32
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  23. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
  24. 5 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
  25. 88 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java
  26. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
  27. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java
  28. 1 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
  29. 9 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  30. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
  31. 50 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
  32. 12 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  33. 22 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  34. 25 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
  35. 87 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  36. 60 45
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
  37. 0 18
      hadoop-common-project/hadoop-common/src/test/resources/test-patch.properties
  38. 0 21
      hadoop-hdfs-project/dev-support/test-patch.properties
  39. 22 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  40. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  41. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  42. 0 31
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  43. 4 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  44. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  45. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  46. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  47. 18 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  48. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
  49. 25 24
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
  50. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  51. 22 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  52. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  53. 31 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  54. 2 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  55. 7 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
  56. 1 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
  57. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  58. 159 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
  59. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  60. 31 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  61. 36 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
  62. 73 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  63. 9 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
  64. 28 0
      hadoop-mapreduce-project/CHANGES.txt
  65. 0 18
      hadoop-mapreduce-project/dev-support/test-patch.properties
  66. 1 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  67. 9 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  68. 2 13
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  69. 10 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
  70. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
  71. 71 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
  72. 3 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
  73. 3 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClientGetJob.java
  74. 15 32
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
  75. 10 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
  76. 1 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
  77. 15 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
  78. 0 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
  79. 1 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java
  80. 9 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
  81. 4 4
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
  82. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui.min-1.8.16.js.gz
  83. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery.min-1.6.4.js.gz
  84. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_flat_0_aaaaaa_40x100.png
  85. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_flat_75_ffffff_40x100.png
  86. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_55_fbf9ee_1x400.png
  87. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_65_ffffff_1x400.png
  88. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_75_dadada_1x400.png
  89. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_75_e6e6e6_1x400.png
  90. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_95_fef1ec_1x400.png
  91. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_highlight-soft_75_cccccc_1x100.png
  92. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_222222_256x240.png
  93. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_2e83ff_256x240.png
  94. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_454545_256x240.png
  95. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_888888_256x240.png
  96. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_cd0a0a_256x240.png
  97. 566 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/jquery-ui.css
  98. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_diagonals-thick_8_333333_40x40.png
  99. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_flat_65_ffffff_40x100.png
  100. 二进制
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_glass_40_111111_1x400.png

+ 0 - 21
dev-support/test-patch.properties

@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of acceptable warning for *all* modules
-# Please update the per-module test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=8

+ 93 - 33
dev-support/test-patch.sh

@@ -240,15 +240,6 @@ setup () {
       cleanupAndExit 0
     fi
   fi
-  . $BASEDIR/dev-support/test-patch.properties
-  ### exit if warnings are NOT defined in the properties file
-  if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]]; then
-    echo "Please define the following properties in test-patch.properties file"
-	 echo  "OK_FINDBUGS_WARNINGS"
-	 echo  "OK_RELEASEAUDIT_WARNINGS"
-	 echo  "OK_JAVADOC_WARNINGS"
-    cleanupAndExit 1
-  fi
   echo ""
   echo ""
   echo "======================================================================"
@@ -389,10 +380,10 @@ checkJavadocWarnings () {
   echo ""
   echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
   if [ -d hadoop-project ]; then
-    (cd hadoop-project; $MVN install)
+    (cd hadoop-project; $MVN install > /dev/null 2>&1)
   fi
   if [ -d hadoop-common-project/hadoop-annotations ]; then  
-    (cd hadoop-common-project/hadoop-annotations; $MVN install)
+    (cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1)
   fi
   $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
   javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
@@ -400,8 +391,10 @@ checkJavadocWarnings () {
   echo ""
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
 
+  #There are 6 warnings that are caused by things that are caused by using sun internal APIs.
+  OK_JAVADOC_WARNINGS=6;
   ### if current warnings greater than OK_JAVADOC_WARNINGS
-  if [[ $javadocWarnings -gt $OK_JAVADOC_WARNINGS ]] ; then
+  if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
     -1 javadoc.  The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
@@ -472,8 +465,8 @@ checkReleaseAuditWarnings () {
   echo "======================================================================"
   echo ""
   echo ""
-  echo "$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1"
-  $MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1
+  echo "$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchReleaseAuditOutput.txt 2>&1"
+  $MVN apache-rat:check -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchReleaseAuditOutput.txt 2>&1
   find $BASEDIR -name rat.txt | xargs cat > $PATCH_DIR/patchReleaseAuditWarnings.txt
 
   ### Compare trunk and patch release audit warning numbers
@@ -481,12 +474,12 @@ checkReleaseAuditWarnings () {
     patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt`
     echo ""
     echo ""
-    echo "There appear to be $OK_RELEASEAUDIT_WARNINGS release audit warnings before the patch and $patchReleaseAuditWarnings release audit warnings after applying the patch."
-    if [[ $patchReleaseAuditWarnings != "" && $OK_RELEASEAUDIT_WARNINGS != "" ]] ; then
-      if [[ $patchReleaseAuditWarnings -gt $OK_RELEASEAUDIT_WARNINGS ]] ; then
+    echo "There appear to be $patchReleaseAuditWarnings release audit warnings after applying the patch."
+    if [[ $patchReleaseAuditWarnings != "" ]] ; then
+      if [[ $patchReleaseAuditWarnings -gt 0 ]] ; then
         JIRA_COMMENT="$JIRA_COMMENT
 
-    -1 release audit.  The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $OK_RELEASEAUDIT_WARNINGS warnings)."
+    -1 release audit.  The applied patch generated $patchReleaseAuditWarnings release audit warnings."
         $GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
         echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
         JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
@@ -548,10 +541,21 @@ checkFindbugsWarnings () {
   echo "======================================================================"
   echo ""
   echo ""
-  echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess" 
-  $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null
+  
+  modules=$(findModules)
+  rc=0
+  for module in $modules;
+  do
+    cd $module
+    echo "  Running findbugs in $module"
+    module_suffix=`basename ${module}`
+    echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null > $PATCH_DIR/patchFindBugsOutput${module_suffix}.txt 2>&1" 
+    $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null > $PATCH_DIR/patchFindBugsOutput${module_suffix}.txt 2>&1
+    (( rc = rc + $? ))
+    cd -
+  done
 
-  if [ $? != 0 ] ; then
+  if [ $rc != 0 ] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
     -1 findbugs.  The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
@@ -584,11 +588,10 @@ $JIRA_COMMENT_FOOTER"
     fi
   done
 
-  ### if current warnings greater than OK_FINDBUGS_WARNINGS
-  if [[ $findbugsWarnings -gt $OK_FINDBUGS_WARNINGS ]] ; then
+  if [[ $findbugsWarnings -gt 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
-    -1 findbugs.  The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings."
+    -1 findbugs.  The patch appears to introduce $findbugsWarnings new Findbugs (version ${findbugs_version}) warnings."
     return 1
   fi
   JIRA_COMMENT="$JIRA_COMMENT
@@ -610,8 +613,8 @@ checkEclipseGeneration () {
   echo ""
   echo ""
 
-  echo "$MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess"
-  $MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess
+  echo "$MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchEclipseOutput.txt 2>&1"
+  $MVN eclipse:eclipse -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchEclipseOutput.txt 2>&1
   if [[ $? != 0 ]] ; then
       JIRA_COMMENT="$JIRA_COMMENT
 
@@ -639,16 +642,28 @@ runTests () {
   echo ""
   echo ""
 
-  echo "$MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
-  $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
-  failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
-  # With -fn mvn always exits with a 0 exit code.  Because of this we need to
-  # find the errors instead of using the exit code.  We assume that if the build
-  # failed a -1 is already given for that case
+  failed_tests=""
+  modules=$(findModules)
+  for module in $modules;
+  do
+    cd $module
+    echo "  Running tests in $module"
+    echo "  $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
+    $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
+    module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
+    # With -fn mvn always exits with a 0 exit code.  Because of this we need to
+    # find the errors instead of using the exit code.  We assume that if the build
+    # failed a -1 is already given for that case
+    if [[ -n "$module_failed_tests" ]] ; then
+      failed_tests="${failed_tests}
+${module_failed_tests}"
+    fi
+    cd -
+  done
   if [[ -n "$failed_tests" ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
-    -1 core tests.  The patch failed these unit tests:
+    -1 core tests.  The patch failed these unit tests in $modules:
 $failed_tests"
     return 1
   fi
@@ -658,6 +673,51 @@ $failed_tests"
   return 0
 }
 
+###############################################################################
+# Find the maven module containing the given file.
+findModule (){
+ dir=`dirname $1`
+ while [ 1 ]
+ do
+  if [ -f "$dir/pom.xml" ]
+  then
+    echo $dir
+    return
+  else
+    dir=`dirname $dir`
+  fi
+ done
+}
+
+findModules () {
+  # Come up with a list of changed files into $TMP
+  TMP=/tmp/tmp.paths.$$
+  $GREP '^+++\|^---' $PATCH_DIR/patch | cut -c '5-' | $GREP -v /dev/null | sort | uniq > $TMP
+  
+  # if all of the lines start with a/ or b/, then this is a git patch that
+  # was generated without --no-prefix
+  if ! $GREP -qv '^a/\|^b/' $TMP ; then
+    sed -i -e 's,^[ab]/,,' $TMP
+  fi
+  
+  # Now find all the modules that were changed
+  TMP_MODULES=/tmp/tmp.modules.$$
+  for file in $(cut -f 1 $TMP | sort | uniq); do
+    echo $(findModule $file) >> $TMP_MODULES
+  done
+  rm $TMP
+  
+  # Filter out modules without code 
+  CHANGED_MODULES=""
+  for module in $(cat $TMP_MODULES | sort | uniq); do
+    $GREP "<packaging>pom</packaging>" $module/pom.xml > /dev/null
+    if [ "$?" != 0 ]; then
+      CHANGED_MODULES="$CHANGED_MODULES $module"
+    fi
+  done
+  rm $TMP_MODULES
+  echo $CHANGED_MODULES
+}
 ###############################################################################
 ### Run the test-contrib target
 runContribTests () {

+ 0 - 21
hadoop-common-project/dev-support/test-patch.properties

@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of acceptable warning for this module
-# Please update the root test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=13

+ 3 - 5
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -26,7 +26,6 @@ import javax.security.auth.login.Configuration;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
 import java.io.IOException;
-import java.lang.reflect.Field;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.security.AccessControlContext;
@@ -196,11 +195,10 @@ public class KerberosAuthenticator implements Authenticator {
           try {
             GSSManager gssManager = GSSManager.getInstance();
             String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
-            
+            Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
             GSSName serviceName = gssManager.createName(servicePrincipal,
-                                                        GSSName.NT_HOSTBASED_SERVICE);
-            Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, 
-                gssManager);
+                                                        oid);
+            oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
             gssContext = gssManager.createContext(serviceName, oid, null,
                                                   GSSContext.DEFAULT_LIFETIME);
             gssContext.requestCredDeleg(true);

+ 8 - 5
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -327,6 +327,8 @@ public class AuthenticationFilter implements Filter {
   @Override
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
       throws IOException, ServletException {
+    boolean unauthorizedResponse = true;
+    String unauthorizedMsg = "";
     HttpServletRequest httpRequest = (HttpServletRequest) request;
     HttpServletResponse httpResponse = (HttpServletResponse) response;
     try {
@@ -350,6 +352,7 @@ public class AuthenticationFilter implements Filter {
         newToken = true;
       }
       if (token != null) {
+        unauthorizedResponse = false;
         if (LOG.isDebugEnabled()) {
           LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
         }
@@ -378,17 +381,17 @@ public class AuthenticationFilter implements Filter {
         }
         filterChain.doFilter(httpRequest, httpResponse);
       }
-      else {
-        throw new AuthenticationException("Missing AuthenticationToken");
-      }
     } catch (AuthenticationException ex) {
+      unauthorizedMsg = ex.toString();
+      LOG.warn("Authentication exception: " + ex.getMessage(), ex);
+    }
+    if (unauthorizedResponse) {
       if (!httpResponse.isCommitted()) {
         Cookie cookie = createCookie("");
         cookie.setMaxAge(0);
         httpResponse.addCookie(cookie);
-        httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, ex.getMessage());
+        httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, unauthorizedMsg);
       }
-      LOG.warn("Authentication exception: " + ex.getMessage(), ex);
     }
   }
 

+ 2 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -22,7 +22,6 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 
 import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.Oid;
 
 public class KerberosUtil {
@@ -34,8 +33,7 @@ public class KerberosUtil {
       : "com.sun.security.auth.module.Krb5LoginModule";
   }
   
-  public static Oid getOidClassInstance(String servicePrincipal,
-      GSSManager gssManager) 
+  public static Oid getOidInstance(String oidName) 
       throws ClassNotFoundException, GSSException, NoSuchFieldException,
       IllegalAccessException {
     Class<?> oidClass;
@@ -44,7 +42,7 @@ public class KerberosUtil {
     } else {
       oidClass = Class.forName("sun.security.jgss.GSSUtil");
     }
-    Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID");
+    Field oidField = oidClass.getDeclaredField(oidName);
     return (Oid)oidField.get(oidClass);
   }
 

+ 3 - 3
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -145,10 +145,10 @@ public class TestKerberosAuthenticationHandler extends TestCase {
         GSSContext gssContext = null;
         try {
           String servicePrincipal = KerberosTestUtils.getServerPrincipal();
+          Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
           GSSName serviceName = gssManager.createName(servicePrincipal,
-              GSSName.NT_HOSTBASED_SERVICE);
-          Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, 
-              gssManager);
+              oid);
+          oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
           gssContext = gssManager.createContext(serviceName, oid, null,
                                                   GSSContext.DEFAULT_LIFETIME);
           gssContext.requestCredDeleg(true);

+ 39 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -65,6 +65,8 @@ Trunk (unreleased changes)
 
     HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
 
+    HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -124,6 +126,9 @@ Trunk (unreleased changes)
     HADOOP-8312. testpatch.sh should provide a simpler way to see which
     warnings changed (bobby)
 
+    HADOOP-8339. jenkins complaining about 16 javadoc warnings 
+    (Tom White and Robert Evans via tgraves)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -158,6 +163,9 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8210. Common side of HDFS-3148: The client should be able
     to use multiple local interfaces for data transfer. (eli)
 
+    HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and 
+    MetricsServlet (tucu)
+
   IMPROVEMENTS
 
     HADOOP-7524. Change RPC to allow multiple protocols including multuple
@@ -274,6 +282,20 @@ Release 2.0.0 - UNRELEASED
 
     HADOOP-7549. Use JDK ServiceLoader mechanism to find FileSystem implementations. (tucu)
 
+    HADOOP-8185. Update namenode -format documentation and add -nonInteractive
+    and -force. (Arpit Gupta via atm)
+
+    HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu)
+
+    HADOOP-8347. Hadoop Common logs misspell 'successful'.
+    (Philip Zeyliger via eli)
+
+    HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has
+    a tunable timeout. (todd)
+
+    HADOOP-8356. FileSystem service loading mechanism should print the FileSystem 
+    impl it is failing to load (tucu)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -390,6 +412,17 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8325. Add a ShutdownHookManager to be used by different
     components instead of the JVM shutdownhook (tucu)
 
+    HADOOP-8275. Range check DelegationKey length.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8342. HDFS command fails with exception following merge of 
+    HADOOP-8325 (tucu)
+
+    HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due
+    to fixes introduced by the IBM JDK compatibility patch. (ddas)
+
+    HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -442,6 +475,12 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8116. RetriableCommand is using RetryPolicy incorrectly after
     HADOOP-7896. (atm)
 
+    HADOOP-8317. Update maven-assembly-plugin to 2.3 - fix build on FreeBSD
+    (Radim Kolar via bobby)
+
+    HADOOP-8172. Configuration no longer sets all keys in a deprecated key 
+    list. (Anupam Seth via bobby)
+
 Release 0.23.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 8 - 6
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -50,15 +50,16 @@ fi
 COMMAND=$1
 case $COMMAND in
   #hdfs commands
-  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt)
+  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups)
     echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
     echo "Instead use the hdfs command for it." 1>&2
     echo "" 1>&2
     #try to locate hdfs and if present, delegate to it.  
+    shift
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs $*
+      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  $*
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs $*
+      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
     else
       echo "HADOOP_HDFS_HOME not found!"
       exit 1
@@ -66,15 +67,16 @@ case $COMMAND in
     ;;
 
   #mapred commands for backwards compatibility
-  pipes|job|queue)
+  pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
     echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
     echo "Instead use the mapred command for it." 1>&2
     echo "" 1>&2
     #try to locate mapred and if present, delegate to it.
+    shift
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred $*
+      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $*
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred $*
+      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $*
     else
       echo "HADOOP_MAPRED_HOME not found!"
       exit 1

+ 6 - 3
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml

@@ -696,7 +696,7 @@
 					<a href="http://hadoop.apache.org/hdfs/docs/current/hdfs_user_guide.html#Upgrade+and+Rollback">Upgrade and Rollback</a>.
 				</p>
 				<p>
-					<code>Usage: hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint] | [-checkpoint] | [-backup]</code>
+					<code>Usage: hadoop namenode [-format [-force] [-nonInteractive] [-clusterid someid]] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint] | [-checkpoint] | [-backup]</code>
 				</p>
 				<table>
 			          <tr><th> COMMAND_OPTION </th><th> Description </th></tr>
@@ -714,8 +714,11 @@
                   <td>Start namenode in backup role, maintaining an up-to-date in-memory copy of the namespace and creating periodic checkpoints.</td>
                 </tr>
 			           <tr>
-			          	<td><code>-format</code></td>
-			            <td>Formats the namenode. It starts the namenode, formats it and then shut it down.</td>
+			          	<td><code>-format [-force] [-nonInteractive] [-clusterid someid]</code></td>
+			            <td>Formats the namenode. It starts the namenode, formats it and then shuts it down. User will be prompted before formatting any non empty name directories in the local filesystem.<br/>
+                                    -nonInteractive: User will not be prompted for input if non empty name directories exist in the local filesystem and the format will fail.<br/>
+                                    -force: Formats the namenode and the user will NOT be prompted to confirm formatting of the name directories in the local filesystem. If -nonInteractive option is specified it will be ignored.<br/>
+                                    -clusterid: Associates the namenode with the id specified. When formatting federated namenodes use this option to make sure all namenodes are associated with the same id.</td>
 			           </tr>
 			           <tr>
 			          	<td><code>-upgrade</code></td>

+ 2 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.conf;
 
 import java.io.IOException;
-import java.io.OutputStreamWriter;
 import java.io.Writer;
 
 import javax.servlet.ServletException;
@@ -57,9 +56,8 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    // Do the authorization
-    if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
-        response)) {
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+                                                   request, response)) {
       return;
     }
 

+ 109 - 32
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -33,6 +33,7 @@ import java.io.Writer;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Enumeration;
@@ -269,10 +270,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * If a key is deprecated in favor of multiple keys, they are all treated as 
+   * aliases of each other, and setting any one of them resets all the others 
+   * to the new value.
+   * 
    * @param key
    * @param newKeys
    * @param customMessage
+   * @deprecated use {@link addDeprecation(String key, String newKey,
+      String customMessage)} instead
    */
+  @Deprecated
   public synchronized static void addDeprecation(String key, String[] newKeys,
       String customMessage) {
     if (key == null || key.length() == 0 ||
@@ -288,6 +297,22 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
     }
   }
+  
+  /**
+   * Adds the deprecated key to the deprecation map.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * @param key
+   * @param newKey
+   * @param customMessage
+   */
+  public synchronized static void addDeprecation(String key, String newKey,
+	      String customMessage) {
+	  addDeprecation(key, new String[] {newKey}, customMessage);
+  }
 
   /**
    * Adds the deprecated key to the deprecation map when no custom message
@@ -297,13 +322,34 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
    * 
+   * If a key is deprecated in favor of multiple keys, they are all treated as 
+   * aliases of each other, and setting any one of them resets all the others 
+   * to the new value.
+   * 
    * @param key Key that is to be deprecated
    * @param newKeys list of keys that take up the values of deprecated key
+   * @deprecated use {@link addDeprecation(String key, String newKey)} instead
    */
+  @Deprecated
   public synchronized static void addDeprecation(String key, String[] newKeys) {
     addDeprecation(key, newKeys, null);
   }
   
+  /**
+   * Adds the deprecated key to the deprecation map when no custom message
+   * is provided.
+   * It does not override any existing entries in the deprecation map.
+   * This is to be used only by the developers in order to add deprecation of
+   * keys, and attempts to call this method after loading resources once,
+   * would lead to <tt>UnsupportedOperationException</tt>
+   * 
+   * @param key Key that is to be deprecated
+   * @param newKey key that takes up the value of deprecated key
+   */
+  public synchronized static void addDeprecation(String key, String newKey) {
+	addDeprecation(key, new String[] {newKey}, null);
+  }
+  
   /**
    * checks whether the given <code>key</code> is deprecated.
    * 
@@ -322,16 +368,26 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param name property name.
    * @return alternate name.
    */
-  private String getAlternateName(String name) {
-    String altName;
+  private String[] getAlternateNames(String name) {
+    String oldName, altNames[] = null;
     DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
-    if (keyInfo != null) {
-      altName = (keyInfo.newKeys.length > 0) ? keyInfo.newKeys[0] : null;
-    }
-    else {
-      altName = reverseDeprecatedKeyMap.get(name);
+    if (keyInfo == null) {
+      altNames = (reverseDeprecatedKeyMap.get(name) != null ) ? 
+        new String [] {reverseDeprecatedKeyMap.get(name)} : null;
+      if(altNames != null && altNames.length > 0) {
+    	//To help look for other new configs for this deprecated config
+    	keyInfo = deprecatedKeyMap.get(altNames[0]);
+      }      
+    } 
+    if(keyInfo != null && keyInfo.newKeys.length > 0) {
+      List<String> list = new ArrayList<String>(); 
+      if(altNames != null) {
+    	  list.addAll(Arrays.asList(altNames));
+      }
+      list.addAll(Arrays.asList(keyInfo.newKeys));
+      altNames = list.toArray(new String[list.size()]);
     }
-    return altName;
+    return altNames;
   }
 
   /**
@@ -346,24 +402,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return the first property in the list of properties mapping
    *         the <code>name</code> or the <code>name</code> itself.
    */
-  private String handleDeprecation(String name) {
-    if (isDeprecated(name)) {
+  private String[] handleDeprecation(String name) {
+    ArrayList<String > names = new ArrayList<String>();
+	if (isDeprecated(name)) {
       DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
       warnOnceIfDeprecated(name);
       for (String newKey : keyInfo.newKeys) {
         if(newKey != null) {
-          name = newKey;
-          break;
+          names.add(newKey);
         }
       }
     }
-    String deprecatedKey = reverseDeprecatedKeyMap.get(name);
-    if (deprecatedKey != null && !getOverlay().containsKey(name) &&
-        getOverlay().containsKey(deprecatedKey)) {
-      getProps().setProperty(name, getOverlay().getProperty(deprecatedKey));
-      getOverlay().setProperty(name, getOverlay().getProperty(deprecatedKey));
+    if(names.size() == 0) {
+    	names.add(name);
     }
-    return name;
+    for(String n : names) {
+	  String deprecatedKey = reverseDeprecatedKeyMap.get(n);
+	  if (deprecatedKey != null && !getOverlay().containsKey(n) &&
+	      getOverlay().containsKey(deprecatedKey)) {
+	    getProps().setProperty(n, getOverlay().getProperty(deprecatedKey));
+	    getOverlay().setProperty(n, getOverlay().getProperty(deprecatedKey));
+	  }
+    }
+    return names.toArray(new String[names.size()]);
   }
  
   private void handleDeprecation() {
@@ -595,8 +656,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         or null if no such property exists.
    */
   public String get(String name) {
-    name = handleDeprecation(name);
-    return substituteVars(getProps().getProperty(name));
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = substituteVars(getProps().getProperty(n));
+    }
+    return result;
   }
   
   /**
@@ -633,8 +698,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         its replacing property and null if no such property exists.
    */
   public String getRaw(String name) {
-    name = handleDeprecation(name);
-    return getProps().getProperty(name);
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = getProps().getProperty(n);
+    }
+    return result;
   }
 
   /** 
@@ -652,10 +721,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     getOverlay().setProperty(name, value);
     getProps().setProperty(name, value);
     updatingResource.put(name, UNKNOWN_RESOURCE);
-    String altName = getAlternateName(name);
-    if (altName != null) {
-      getOverlay().setProperty(altName, value);
-      getProps().setProperty(altName, value);
+    String[] altNames = getAlternateNames(name);
+    if (altNames != null && altNames.length > 0) {
+      for(String altName : altNames) {
+    	getOverlay().setProperty(altName, value);
+        getProps().setProperty(altName, value);
+      }
     }
     warnOnceIfDeprecated(name);
   }
@@ -671,12 +742,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Unset a previously set property.
    */
   public synchronized void unset(String name) {
-    String altName = getAlternateName(name);
+    String[] altNames = getAlternateNames(name);
     getOverlay().remove(name);
     getProps().remove(name);
-    if (altName !=null) {
-      getOverlay().remove(altName);
-       getProps().remove(altName);
+    if (altNames !=null && altNames.length > 0) {
+      for(String altName : altNames) {
+    	getOverlay().remove(altName);
+    	getProps().remove(altName);
+      }
     }
   }
 
@@ -711,8 +784,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         doesn't exist.                    
    */
   public String get(String name, String defaultValue) {
-    name = handleDeprecation(name);
-    return substituteVars(getProps().getProperty(name, defaultValue));
+    String[] names = handleDeprecation(name);
+    String result = null;
+    for(String n : names) {
+      result = substituteVars(getProps().getProperty(n, defaultValue));
+    }
+    return result;
   }
     
   /** 

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -228,6 +228,9 @@ public class CommonConfigurationKeysPublic {
   public static final String HADOOP_SECURITY_AUTHORIZATION =
     "hadoop.security.authorization";
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
+    "hadoop.security.instrumentation.requires.admin";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
     "hadoop.security.service.user.name.key";
 }

+ 1 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -199,7 +199,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * @return the protocol scheme for the FileSystem.
    */
   public String getScheme() {
-    throw new UnsupportedOperationException("Not implemented by  the FileSystem implementation");
+    throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation");
   }
 
   /** Returns a URI whose scheme and authority identify this FileSystem.*/
@@ -2198,10 +2198,7 @@ public abstract class FileSystem extends Configured implements Closeable {
       if (map.containsKey(key) && fs == map.get(key)) {
         map.remove(key);
         toAutoClose.remove(key);
-        if (map.isEmpty()) {
-          ShutdownHookManager.get().removeShutdownHook(clientFinalizer);
         }
-      }
     }
 
     synchronized void closeAll() throws IOException {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java

@@ -52,7 +52,7 @@ public interface FenceMethod {
   
   /**
    * Attempt to fence the target node.
-   * @param serviceAddr the address (host:ipcport) of the service to fence
+   * @param target the target of the service to fence
    * @param args the configured arguments, which were checked at startup by
    *             {@link #checkArgs(String)}
    * @return true if fencing was successful, false if unsuccessful or

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -117,12 +117,13 @@ public interface HAServiceProtocol {
   /**
    * Return the current status of the service. The status indicates
    * the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
-   * some additional information. {@see HAServiceStatus}
+   * some additional information.
    * 
    * @throws AccessControlException
    *           if access is denied.
    * @throws IOException
    *           if other errors happen
+   * @see HAServiceStatus
    */
   public HAServiceStatus getServiceStatus() throws AccessControlException,
                                                    IOException;

+ 34 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -779,6 +779,37 @@ public class HttpServer implements FilterContainer {
         : "Inactive HttpServer";
   }
 
+  /**
+   * Checks the user has privileges to access to instrumentation servlets.
+   * <p/>
+   * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
+   * (default value) it always returns TRUE.
+   * <p/>
+   * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
+   * it will check that if the current user is in the admin ACLS. If the user is
+   * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+   *
+   * @param servletContext the servlet context.
+   * @param request the servlet request.
+   * @param response the servlet response.
+   * @return TRUE/FALSE based on the logic decribed above.
+   */
+  public static boolean isInstrumentationAccessAllowed(
+    ServletContext servletContext, HttpServletRequest request,
+    HttpServletResponse response) throws IOException {
+    Configuration conf =
+      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+
+    boolean access = true;
+    boolean adminAccess = conf.getBoolean(
+      CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+      false);
+    if (adminAccess) {
+      access = hasAdministratorAccess(servletContext, request, response);
+    }
+    return access;
+  }
+
   /**
    * Does the user sending the HttpServletRequest has the administrator ACLs? If
    * it isn't the case, response will be modified to send an error to the user.
@@ -794,7 +825,6 @@ public class HttpServer implements FilterContainer {
       HttpServletResponse response) throws IOException {
     Configuration conf =
         (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
-
     // If there is no authorization, anybody has administrator access.
     if (!conf.getBoolean(
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@@ -834,12 +864,11 @@ public class HttpServer implements FilterContainer {
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
-      response.setContentType("text/plain; charset=UTF-8");
-      // Do the authorization
-      if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
-          response)) {
+      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+                                                     request, response)) {
         return;
       }
+      response.setContentType("text/plain; charset=UTF-8");
       PrintWriter out = response.getWriter();
       ReflectionUtils.printThreadInfo(out, "");
       out.close();

+ 34 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java

@@ -326,9 +326,41 @@ public final class WritableUtils  {
    * @return deserialized integer from stream.
    */
   public static int readVInt(DataInput stream) throws IOException {
-    return (int) readVLong(stream);
+    long n = readVLong(stream);
+    if ((n > Integer.MAX_VALUE) || (n < Integer.MIN_VALUE)) {
+      throw new IOException("value too long to fit in integer");
+    }
+    return (int)n;
+  }
+
+  /**
+   * Reads an integer from the input stream and returns it.
+   *
+   * This function validates that the integer is between [lower, upper],
+   * inclusive.
+   *
+   * @param stream Binary input stream
+   * @throws java.io.IOException
+   * @return deserialized integer from stream
+   */
+  public static int readVIntInRange(DataInput stream, int lower, int upper)
+      throws IOException {
+    long n = readVLong(stream);
+    if (n < lower) {
+      if (lower == 0) {
+        throw new IOException("expected non-negative integer, got " + n);
+      } else {
+        throw new IOException("expected integer greater than or equal to " +
+            lower + ", got " + n);
+      }
+    }
+    if (n > upper) {
+      throw new IOException("expected integer less or equal to " + upper +
+          ", got " + n);
+    }
+    return (int)n;
   }
- 
+
   /**
    * Given the first byte of a vint/vlong, determine the sign
    * @param value the first byte

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -217,7 +217,7 @@ public abstract class Server {
   public static final Log AUDITLOG = 
     LogFactory.getLog("SecurityLogger."+Server.class.getName());
   private static final String AUTH_FAILED_FOR = "Auth failed for ";
-  private static final String AUTH_SUCCESSFULL_FOR = "Auth successfull for ";
+  private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
   
   private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>();
 
@@ -1234,7 +1234,7 @@ public abstract class Server {
             LOG.debug("SASL server successfully authenticated client: " + user);
           }
           rpcMetrics.incrAuthenticationSuccesses();
-          AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user);
+          AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
           saslContextEstablished = true;
         }
       } else {
@@ -1776,7 +1776,7 @@ public abstract class Server {
    * from configuration. Otherwise the configuration will be picked up.
    * 
    * If rpcRequestClass is null then the rpcRequestClass must have been 
-   * registered via {@link #registerProtocolEngine(RpcPayloadHeader.RpcKind,
+   * registered via {@link #registerProtocolEngine(RPC.RpcKind,
    *  Class, RPC.RpcInvoker)}
    * This parameter has been retained for compatibility with existing tests
    * and usage.
@@ -1990,7 +1990,7 @@ public abstract class Server {
   
   /** 
    * Called for each call. 
-   * @deprecated Use  {@link #call(RpcPayloadHeader.RpcKind, String,
+   * @deprecated Use  {@link #call(RPC.RpcKind, String,
    *  Writable, long)} instead
    */
   @Deprecated

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -148,9 +148,8 @@ public class JMXJsonServlet extends HttpServlet {
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
-      // Do the authorization
-      if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
-          response)) {
+      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+                                                     request, response)) {
         return;
       }
       JsonGenerator jg = null;

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java

@@ -106,9 +106,8 @@ public class MetricsServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    // Do the authorization
-    if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
-        response)) {
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+                                                   request, response)) {
       return;
     }
 

+ 23 - 32
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -375,53 +375,44 @@ public class NetUtils {
   }
   
   /**
-   * Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+   * Same as <code>getInputStream(socket, socket.getSoTimeout()).</code>
+   * <br><br>
    * 
-   * From documentation for {@link #getInputStream(Socket, long)}:<br>
-   * Returns InputStream for the socket. If the socket has an associated
-   * SocketChannel then it returns a 
-   * {@link SocketInputStream} with the given timeout. If the socket does not
-   * have a channel, {@link Socket#getInputStream()} is returned. In the later
-   * case, the timeout argument is ignored and the timeout set with 
-   * {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
-   *
-   * Any socket created using socket factories returned by {@link NetUtils},
-   * must use this interface instead of {@link Socket#getInputStream()}.
-   *     
    * @see #getInputStream(Socket, long)
-   * 
-   * @param socket
-   * @return InputStream for reading from the socket.
-   * @throws IOException
    */
-  public static InputStream getInputStream(Socket socket) 
+  public static SocketInputWrapper getInputStream(Socket socket) 
                                            throws IOException {
     return getInputStream(socket, socket.getSoTimeout());
   }
-  
+
   /**
-   * Returns InputStream for the socket. If the socket has an associated
-   * SocketChannel then it returns a 
-   * {@link SocketInputStream} with the given timeout. If the socket does not
-   * have a channel, {@link Socket#getInputStream()} is returned. In the later
-   * case, the timeout argument is ignored and the timeout set with 
-   * {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+   * Return a {@link SocketInputWrapper} for the socket and set the given
+   * timeout. If the socket does not have an associated channel, then its socket
+   * timeout will be set to the specified value. Otherwise, a
+   * {@link SocketInputStream} will be created which reads with the configured
+   * timeout.
    * 
-   * Any socket created using socket factories returned by {@link NetUtils},
+   * Any socket created using socket factories returned by {@link #NetUtils},
    * must use this interface instead of {@link Socket#getInputStream()}.
-   *     
+   * 
+   * In general, this should be called only once on each socket: see the note
+   * in {@link SocketInputWrapper#setTimeout(long)} for more information.
+   *
    * @see Socket#getChannel()
    * 
    * @param socket
-   * @param timeout timeout in milliseconds. This may not always apply. zero
-   *        for waiting as long as necessary.
-   * @return InputStream for reading from the socket.
+   * @param timeout timeout in milliseconds. zero for waiting as
+   *                long as necessary.
+   * @return SocketInputWrapper for reading from the socket.
    * @throws IOException
    */
-  public static InputStream getInputStream(Socket socket, long timeout) 
+  public static SocketInputWrapper getInputStream(Socket socket, long timeout) 
                                            throws IOException {
-    return (socket.getChannel() == null) ? 
-          socket.getInputStream() : new SocketInputStream(socket, timeout);
+    InputStream stm = (socket.getChannel() == null) ? 
+          socket.getInputStream() : new SocketInputStream(socket);
+    SocketInputWrapper w = new SocketInputWrapper(socket, stm);
+    w.setTimeout(timeout);
+    return w;
   }
   
   /**

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java

@@ -247,6 +247,10 @@ abstract class SocketIOWithTimeout {
                                                               ops)); 
     }
   }
+
+  public void setTimeout(long timeoutMs) {
+    this.timeout = timeoutMs;
+  }
     
   private static String timeoutExceptionString(SelectableChannel channel,
                                                long timeout, int ops) {

+ 5 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java

@@ -28,9 +28,6 @@ import java.nio.channels.ReadableByteChannel;
 import java.nio.channels.SelectableChannel;
 import java.nio.channels.SelectionKey;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
 /**
  * This implements an input stream that can have a timeout while reading.
  * This sets non-blocking flag on the socket channel.
@@ -40,9 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * IllegalBlockingModeException. 
  * Please use {@link SocketOutputStream} for writing.
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class SocketInputStream extends InputStream
+class SocketInputStream extends InputStream
                                implements ReadableByteChannel {
 
   private Reader reader;
@@ -171,4 +166,8 @@ public class SocketInputStream extends InputStream
   public void waitForReadable() throws IOException {
     reader.waitForIO(SelectionKey.OP_READ);
   }
+
+  public void setTimeout(long timeoutMs) {
+    reader.setTimeout(timeoutMs);
+  }
 }

+ 88 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java

@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.io.FilterInputStream;
+
+import java.io.InputStream;
+import java.net.Socket;
+import java.net.SocketException;
+import java.nio.channels.ReadableByteChannel;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * A wrapper stream around a socket which allows setting of its timeout. If the
+ * socket has a channel, this uses non-blocking IO via the package-private
+ * {@link SocketInputStream} implementation. Otherwise, timeouts are managed by
+ * setting the underlying socket timeout itself.
+ */
+@InterfaceAudience.LimitedPrivate("HDFS")
+@InterfaceStability.Unstable
+public class SocketInputWrapper extends FilterInputStream {
+  private final Socket socket;
+  private final boolean hasChannel;
+
+  SocketInputWrapper(Socket s, InputStream is) {
+    super(is);
+    this.socket = s;
+    this.hasChannel = s.getChannel() != null;
+    if (hasChannel) {
+      Preconditions.checkArgument(is instanceof SocketInputStream,
+          "Expected a SocketInputStream when there is a channel. " +
+          "Got: %s", is);
+    }
+  }
+
+  /**
+   * Set the timeout for reads from this stream.
+   * 
+   * Note: the behavior here can differ subtly depending on whether the
+   * underlying socket has an associated Channel. In particular, if there is no
+   * channel, then this call will affect the socket timeout for <em>all</em>
+   * readers of this socket. If there is a channel, then this call will affect
+   * the timeout only for <em>this</em> stream. As such, it is recommended to
+   * only create one {@link SocketInputWrapper} instance per socket.
+   * 
+   * @param timeoutMs
+   *          the new timeout, 0 for no timeout
+   * @throws SocketException
+   *           if the timeout cannot be set
+   */
+  public void setTimeout(long timeoutMs) throws SocketException {
+    if (hasChannel) {
+      ((SocketInputStream)in).setTimeout(timeoutMs);
+    } else {
+      socket.setSoTimeout((int)timeoutMs);
+    }
+  }
+
+  /**
+   * @return an underlying ReadableByteChannel implementation.
+   * @throws IllegalStateException if this socket does not have a channel
+   */
+  public ReadableByteChannel getReadableByteChannel() {
+    Preconditions.checkState(hasChannel,
+        "Socket %s does not have a channel",
+        this.socket);
+    return (SocketInputStream)in;
+  }
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java

@@ -59,7 +59,7 @@ public class ServiceAuthorizationManager {
   public static final Log AUDITLOG =
     LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
 
-  private static final String AUTHZ_SUCCESSFULL_FOR = "Authorization successfull for ";
+  private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
   private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
 
   
@@ -108,7 +108,7 @@ public class ServiceAuthorizationManager {
           " is not authorized for protocol " + protocol + 
           ", expected client Kerberos principal is " + clientPrincipal);
     }
-    AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol);
+    AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol);
   }
 
   public synchronized void refresh(Configuration conf,

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java

@@ -41,6 +41,7 @@ public class DelegationKey implements Writable {
   private long expiryDate;
   @Nullable
   private byte[] keyBytes = null;
+  private static final int MAX_KEY_LEN = 1024 * 1024;
 
   /** Default constructore required for Writable */
   public DelegationKey() {
@@ -55,6 +56,10 @@ public class DelegationKey implements Writable {
     this.keyId = keyId;
     this.expiryDate = expiryDate;
     if (encodedKey != null) {
+      if (encodedKey.length > MAX_KEY_LEN) {
+        throw new RuntimeException("can't create " + encodedKey.length +
+            " byte long DelegationKey.");
+      }
       this.keyBytes = encodedKey;
     }
   }
@@ -102,7 +107,7 @@ public class DelegationKey implements Writable {
   public void readFields(DataInput in) throws IOException {
     keyId = WritableUtils.readVInt(in);
     expiryDate = WritableUtils.readVLong(in);
-    int len = WritableUtils.readVInt(in);
+    int len = WritableUtils.readVIntInRange(in, -1, MAX_KEY_LEN);
     if (len == -1) {
       keyBytes = null;
     } else {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c

@@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 
   // set the name of the group for subsequent calls to getnetgrent
   // note that we want to end group lokup regardless whether setnetgrent
-  // was successfull or not (as long as it was called we need to call
+  // was successful or not (as long as it was called we need to call
   // endnetgrent)
   setnetgrentCalledFlag = 1;
   if(setnetgrent(cgroup) == 1) {

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -62,6 +62,15 @@
   <description>Is service-level authorization enabled?</description>
 </property>
 
+<property>
+  <name>hadoop.security.instrumentation.requires.admin</name>
+  <value>false</value>
+  <description>
+    Indicates if administrator ACLs are required to access
+    instrumentation servlets (JMX, METRICS, CONF, STACKS).
+  </description>
+</property>
+
 <property>
   <name>hadoop.security.authentication</name>
   <value>simple</value>

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java

@@ -164,7 +164,7 @@ public class TestConfigurationDeprecation {
     conf.set("Y", "y");
     conf.set("Z", "z");
     // get old key
-    assertEquals("y", conf.get("X"));
+    assertEquals("z", conf.get("X"));
   }
 
   /**
@@ -305,7 +305,7 @@ public class TestConfigurationDeprecation {
     assertTrue("deprecated Key not found", dKFound);
     assertTrue("new Key not found", nKFound);
   }
-
+  
   @Test
   public void testUnsetWithDeprecatedKeys() {
     Configuration conf = new Configuration();

+ 50 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java

@@ -18,10 +18,15 @@
 
 package org.apache.hadoop.conf;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayOutputStream;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.junit.Test;
 
 import junit.framework.TestCase;
 
@@ -53,4 +58,49 @@ public class TestDeprecatedKeys extends TestCase {
     assertTrue(fileContents.contains("old.config.yet.to.be.deprecated"));
     assertTrue(fileContents.contains("new.conf.to.replace.deprecated.conf"));
   }
+  
+  @Test
+  public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys() {
+    Configuration conf = new Configuration();
+    Configuration.addDeprecation("dK", new String[]{"nK1", "nK2"});
+    conf.set("k", "v");
+    conf.set("dK", "V");
+    assertEquals("V", conf.get("dK"));
+    assertEquals("V", conf.get("nK1"));
+    assertEquals("V", conf.get("nK2"));
+    conf.set("nK1", "VV");
+    assertEquals("VV", conf.get("dK"));
+    assertEquals("VV", conf.get("nK1"));
+    assertEquals("VV", conf.get("nK2"));
+    conf.set("nK2", "VVV");
+    assertEquals("VVV", conf.get("dK"));
+    assertEquals("VVV", conf.get("nK2"));
+    assertEquals("VVV", conf.get("nK1"));
+    boolean kFound = false;
+    boolean dKFound = false;
+    boolean nK1Found = false;
+    boolean nK2Found = false;
+    for (Map.Entry<String, String> entry : conf) {
+      if (entry.getKey().equals("k")) {
+        assertEquals("v", entry.getValue());
+        kFound = true;
+      }
+      if (entry.getKey().equals("dK")) {
+        assertEquals("VVV", entry.getValue());
+        dKFound = true;
+      }
+      if (entry.getKey().equals("nK1")) {
+        assertEquals("VVV", entry.getValue());
+        nK1Found = true;
+      }
+      if (entry.getKey().equals("nK2")) {
+        assertEquals("VVV", entry.getValue());
+        nK2Found = true;
+      }
+    }
+    assertTrue("regular Key not found", kFound);
+    assertTrue("deprecated Key not found", dKFound);
+    assertTrue("new Key 1 not found", nK1Found);
+    assertTrue("new Key 2 not found", nK2Found);
+  }
 }

+ 12 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.http;
 
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 
@@ -70,6 +71,12 @@ public class HttpServerFunctionalTest extends Assert {
     return createServer(TEST, conf);
   }
 
+  public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
+      throws IOException {
+    prepareTestWebapp();
+    return createServer(TEST, conf, adminsAcl);
+  }
+
   /**
    * Create but do not start the test webapp server. The test webapp dir is
    * prepared/checked in advance.
@@ -132,6 +139,11 @@ public class HttpServerFunctionalTest extends Assert {
       throws IOException {
     return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
   }
+
+  public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
+      throws IOException {
+    return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl);
+  }
   /**
    * Create an HttpServer instance for the given webapp
    * @param webapp the webapp to work with

+ 22 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -60,7 +60,6 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.mortbay.util.ajax.JSON;
 
@@ -360,6 +359,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         true);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+        true);
     conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
 
@@ -468,6 +469,26 @@ public class TestHttpServer extends HttpServerFunctionalTest {
 
   }
 
+  @Test
+  public void testRequiresAuthorizationAccess() throws Exception {
+    Configuration conf = new Configuration();
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    //requires admin access to instrumentation, FALSE by default
+    Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+
+    //requires admin access to instrumentation, TRUE
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+  }
+
   @Test public void testBindAddress() throws Exception {
     checkBindAddress("0.0.0.0", 0, false).stop();
     // hang onto this one for a bit more testing

+ 25 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java

@@ -44,6 +44,26 @@ public class TestWritableUtils extends TestCase {
     assertEquals(vintlen, WritableUtils.getVIntSize(val));
     assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
   }
+  
+  public static void testReadInRange(long val, int lower,
+      int upper, boolean expectSuccess) throws IOException {
+    DataOutputBuffer buf = new DataOutputBuffer();
+    DataInputBuffer inbuf = new DataInputBuffer();
+    WritableUtils.writeVLong(buf, val);
+    try {
+      inbuf.reset(buf.getData(), 0, buf.getLength());
+      long val2 = WritableUtils.readVIntInRange(inbuf, lower, upper);
+      if (!expectSuccess) {
+        fail("expected readVIntInRange to throw an exception");
+      }
+      assertEquals(val, val2);
+    } catch(IOException e) {
+      if (expectSuccess) {
+        LOG.error("unexpected exception:", e);
+        fail("readVIntInRange threw an unexpected exception");
+      }
+    }
+  }
 
   public static void testVInt() throws Exception {
     testValue(12, 1);
@@ -61,5 +81,10 @@ public class TestWritableUtils extends TestCase {
     testValue(-65536, 3);
     testValue(65536, 4);
     testValue(-65537, 4);
+    testReadInRange(123, 122, 123, true);
+    testReadInRange(123, 0, 100, false);
+    testReadInRange(0, 0, 100, true);
+    testReadInRange(-1, 0, 100, false);
+    testReadInRange(1099511627776L, 0, Integer.MAX_VALUE, false);
   }
 }

+ 87 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -25,11 +25,14 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
+import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.Enumeration;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.AssertionFailedError;
 
@@ -37,7 +40,11 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.NetUtilsTestResolver;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -50,6 +57,13 @@ public class TestNetUtils {
   private static final int LOCAL_PORT = 8080;
   private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
 
+  /**
+   * Some slop around expected times when making sure timeouts behave
+   * as expected. We assume that they will be accurate to within
+   * this threshold.
+   */
+  static final long TIME_FUDGE_MILLIS = 200;
+
   /**
    * Test that we can't accidentally connect back to the connecting socket due
    * to a quirk in the TCP spec.
@@ -81,6 +95,79 @@ public class TestNetUtils {
     }
   }
   
+  @Test
+  public void testSocketReadTimeoutWithChannel() throws Exception {
+    doSocketReadTimeoutTest(true);
+  }
+  
+  @Test
+  public void testSocketReadTimeoutWithoutChannel() throws Exception {
+    doSocketReadTimeoutTest(false);
+  }
+
+  
+  private void doSocketReadTimeoutTest(boolean withChannel)
+      throws IOException {
+    // Binding a ServerSocket is enough to accept connections.
+    // Rely on the backlog to accept for us.
+    ServerSocket ss = new ServerSocket(0);
+    
+    Socket s;
+    if (withChannel) {
+      s = NetUtils.getDefaultSocketFactory(new Configuration())
+          .createSocket();
+      Assume.assumeNotNull(s.getChannel());
+    } else {
+      s = new Socket();
+      assertNull(s.getChannel());
+    }
+    
+    SocketInputWrapper stm = null;
+    try {
+      NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
+
+      stm = NetUtils.getInputStream(s, 1000);
+      assertReadTimeout(stm, 1000);
+
+      // Change timeout, make sure it applies.
+      stm.setTimeout(1);
+      assertReadTimeout(stm, 1);
+      
+      // If there is a channel, then setting the socket timeout
+      // should not matter. If there is not a channel, it will
+      // take effect.
+      s.setSoTimeout(1000);
+      if (withChannel) {
+        assertReadTimeout(stm, 1);
+      } else {
+        assertReadTimeout(stm, 1000);        
+      }
+    } finally {
+      IOUtils.closeStream(stm);
+      IOUtils.closeSocket(s);
+      ss.close();
+    }
+  }
+  
+  private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
+      throws IOException {
+    long st = System.nanoTime();
+    try {
+      stm.read();
+      fail("Didn't time out");
+    } catch (SocketTimeoutException ste) {
+      assertTimeSince(st, timeoutMillis);
+    }
+  }
+
+  private void assertTimeSince(long startNanos, int expectedMillis) {
+    long durationNano = System.nanoTime() - startNanos;
+    long millis = TimeUnit.MILLISECONDS.convert(
+        durationNano, TimeUnit.NANOSECONDS);
+    assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
+        Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
+  }
+  
   /**
    * Test for {
    * @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)

+ 60 - 45
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.net;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.io.OutputStream;
 import java.net.SocketTimeoutException;
 import java.nio.channels.Pipe;
@@ -26,8 +27,13 @@ import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.MultithreadedTestUtil;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /**
  * This tests timout out from SocketInputStream and
@@ -36,14 +42,17 @@ import junit.framework.TestCase;
  * Normal read and write using these streams are tested by pretty much
  * every DFS unit test.
  */
-public class TestSocketIOWithTimeout extends TestCase {
+public class TestSocketIOWithTimeout {
 
   static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
   
   private static int TIMEOUT = 1*1000; 
   private static String TEST_STRING = "1234567890";
+
+  private MultithreadedTestUtil.TestContext ctx = new TestContext();
   
-  private void doIO(InputStream in, OutputStream out) throws IOException {
+  private void doIO(InputStream in, OutputStream out,
+      int expectedTimeout) throws IOException {
     /* Keep on writing or reading until we get SocketTimeoutException.
      * It expects this exception to occur within 100 millis of TIMEOUT.
      */
@@ -61,34 +70,15 @@ public class TestSocketIOWithTimeout extends TestCase {
         long diff = System.currentTimeMillis() - start;
         LOG.info("Got SocketTimeoutException as expected after " + 
                  diff + " millis : " + e.getMessage());
-        assertTrue(Math.abs(TIMEOUT - diff) <= 200);
+        assertTrue(Math.abs(expectedTimeout - diff) <=
+          TestNetUtils.TIME_FUDGE_MILLIS);
         break;
       }
     }
   }
   
-  /**
-   * Just reads one byte from the input stream.
-   */
-  static class ReadRunnable implements Runnable {
-    private InputStream in;
-
-    public ReadRunnable(InputStream in) {
-      this.in = in;
-    }
-    public void run() {
-      try {
-        in.read();
-      } catch (IOException e) {
-        LOG.info("Got expection while reading as expected : " + 
-                 e.getMessage());
-        return;
-      }
-      assertTrue(false);
-    }
-  }
-  
-  public void testSocketIOWithTimeout() throws IOException {
+  @Test
+  public void testSocketIOWithTimeout() throws Exception {
     
     // first open pipe:
     Pipe pipe = Pipe.open();
@@ -96,7 +86,7 @@ public class TestSocketIOWithTimeout extends TestCase {
     Pipe.SinkChannel sink = pipe.sink();
     
     try {
-      InputStream in = new SocketInputStream(source, TIMEOUT);
+      final InputStream in = new SocketInputStream(source, TIMEOUT);
       OutputStream out = new SocketOutputStream(sink, TIMEOUT);
       
       byte[] writeBytes = TEST_STRING.getBytes();
@@ -105,37 +95,62 @@ public class TestSocketIOWithTimeout extends TestCase {
       
       out.write(writeBytes);
       out.write(byteWithHighBit);
-      doIO(null, out);
+      doIO(null, out, TIMEOUT);
       
       in.read(readBytes);
       assertTrue(Arrays.equals(writeBytes, readBytes));
       assertEquals(byteWithHighBit & 0xff, in.read());
-      doIO(in, null);
+      doIO(in, null, TIMEOUT);
+      
+      // Change timeout on the read side.
+      ((SocketInputStream)in).setTimeout(TIMEOUT * 2);
+      doIO(in, null, TIMEOUT * 2);
+      
       
       /*
        * Verify that it handles interrupted threads properly.
-       * Use a large timeout and expect the thread to return quickly.
+       * Use a large timeout and expect the thread to return quickly
+       * upon interruption.
        */
-      in = new SocketInputStream(source, 0);
-      Thread thread = new Thread(new ReadRunnable(in));
-      thread.start();
-      
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException ignored) {}
-      
+      ((SocketInputStream)in).setTimeout(0);
+      TestingThread thread = new TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          try {
+            in.read();
+            fail("Did not fail with interrupt");
+          } catch (InterruptedIOException ste) {
+            LOG.info("Got expection while reading as expected : " + 
+                ste.getMessage());
+          }
+        }
+      };
+      ctx.addThread(thread);
+      ctx.startThreads();
+      // If the thread is interrupted before it calls read()
+      // then it throws ClosedByInterruptException due to
+      // some Java quirk. Waiting for it to call read()
+      // gets it into select(), so we get the expected
+      // InterruptedIOException.
+      Thread.sleep(1000);
       thread.interrupt();
+      ctx.stop();
+
+      //make sure the channels are still open
+      assertTrue(source.isOpen());
+      assertTrue(sink.isOpen());
       
+      // Nevertheless, the output stream is closed, because
+      // a partial write may have succeeded (see comment in
+      // SocketOutputStream#write(byte[]), int, int)
       try {
-        thread.join();
-      } catch (InterruptedException e) {
-        throw new IOException("Unexpected InterruptedException : " + e);
+        out.write(1);
+        fail("Did not throw");
+      } catch (IOException ioe) {
+        GenericTestUtils.assertExceptionContains(
+            "stream is closed", ioe);
       }
       
-      //make sure the channels are still open
-      assertTrue(source.isOpen());
-      assertTrue(sink.isOpen());
-
       out.close();
       assertFalse(sink.isOpen());
       

+ 0 - 18
hadoop-common-project/hadoop-common/src/test/resources/test-patch.properties

@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OK_RELEASEAUDIT_WARNINGS=1
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=6

+ 0 - 21
hadoop-hdfs-project/dev-support/test-patch.properties

@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of acceptable warning for this module
-# Please update the root test-patch.properties if you update this file.
-
-OK_RELEASEAUDIT_WARNINGS=0
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=8

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -417,6 +417,11 @@ Release 2.0.0 - UNRELEASED
     HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs.
     (szetszwo)
 
+    HDFS-3339. Change INode to package private.  (John George via szetszwo)
+
+    HDFS-3303. Remove Writable implementation from RemoteEditLogManifest.
+    (Brandon Li via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -581,6 +586,23 @@ Release 2.0.0 - UNRELEASED
     HDFS-3326. Append enabled log message uses the wrong variable.
     (Matthew Jacobs via eli)
 
+    HDFS-3336. hdfs launcher script will be better off not special casing 
+    namenode command with regards to hadoop.security.logger (rvs via tucu)
+
+    HDFS-3330. If GetImageServlet throws an Error or RTE, response should not
+    have HTTP "OK" status. (todd)
+
+    HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS
+    regardless of whether HA or Federation is enabled. (atm)
+
+    HDFS-3359. DFSClient.close should close cached sockets. (todd)
+
+    HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(),
+    and remove synchronized from updatePermissionStatus(..).  (szetszwo)
+
+    HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout
+    (todd)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -118,12 +118,7 @@ if $cygwin; then
 fi
 export CLASSPATH=$CLASSPATH
 
-#turn security logger on the namenode
-if [ $COMMAND = "namenode" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}"
-else
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
-fi
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
 
 # Check to see if we should start a secure datanode
 if [ "$starting_secure_dn" = "true" ]; then

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -560,6 +560,7 @@ public class DFSClient implements java.io.Closeable {
   void abort() {
     clientRunning = false;
     closeAllFilesBeingWritten(true);
+    socketCache.clear();
     closeConnectionToNamenode();
   }
 
@@ -597,6 +598,7 @@ public class DFSClient implements java.io.Closeable {
   public synchronized void close() throws IOException {
     if(clientRunning) {
       closeAllFilesBeingWritten(false);
+      socketCache.clear();
       clientRunning = false;
       leaserenewer.closeClient(this);
       // close connections to the namenode

+ 0 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -139,37 +139,6 @@ public class DFSUtil {
     return true;
   }
 
-  /**
-   * Utility class to facilitate junit test error simulation.
-   */
-  @InterfaceAudience.Private
-  public static class ErrorSimulator {
-    private static boolean[] simulation = null; // error simulation events
-    public static void initializeErrorSimulationEvent(int numberOfEvents) {
-      simulation = new boolean[numberOfEvents]; 
-      for (int i = 0; i < numberOfEvents; i++) {
-        simulation[i] = false;
-      }
-    }
-    
-    public static boolean getErrorSimulation(int index) {
-      if(simulation == null)
-        return false;
-      assert(index < simulation.length);
-      return simulation[index];
-    }
-    
-    public static void setErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = true;
-    }
-    
-    public static void clearErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = false;
-    }
-  }
-
   /**
    * Converts a byte array to a string using UTF8 encoding.
    */

+ 4 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.util.DirectBufferPool;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.SocketInputStream;
+import org.apache.hadoop.net.SocketInputWrapper;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 
@@ -450,11 +450,8 @@ public class RemoteBlockReader2  implements BlockReader {
     //
     // Get bytes in block, set streams
     //
-    Preconditions.checkArgument(sock.getChannel() != null,
-        "Socket %s does not have an associated Channel.",
-        sock);
-    SocketInputStream sin =
-      (SocketInputStream)NetUtils.getInputStream(sock);
+    SocketInputWrapper sin = NetUtils.getInputStream(sock);
+    ReadableByteChannel ch = sin.getReadableByteChannel();
     DataInputStream in = new DataInputStream(sin);
 
     BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
@@ -477,7 +474,7 @@ public class RemoteBlockReader2  implements BlockReader {
     }
 
     return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
-        sin, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
+        ch, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
   }
 
   static void checkSuccess(

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -94,7 +94,7 @@ import org.apache.hadoop.util.ToolRunner;
  * </pre>
  * 
  * <p>DESCRIPTION
- * <p>The threshold parameter is a fraction in the range of (0%, 100%) with a 
+ * <p>The threshold parameter is a fraction in the range of (1%, 100%) with a 
  * default value of 10%. The threshold sets a target for whether the cluster 
  * is balanced. A cluster is balanced if for each datanode, the utilization 
  * of the node (ratio of used space at the node to total capacity of the node) 
@@ -1503,14 +1503,14 @@ public class Balancer {
               i++;
               try {
                 threshold = Double.parseDouble(args[i]);
-                if (threshold < 0 || threshold > 100) {
-                  throw new NumberFormatException(
+                if (threshold < 1 || threshold > 100) {
+                  throw new IllegalArgumentException(
                       "Number out of range: threshold = " + threshold);
                 }
                 LOG.info( "Using a threshold of " + threshold );
-              } catch(NumberFormatException e) {
+              } catch(IllegalArgumentException e) {
                 System.err.println(
-                    "Expecting a number in the range of [0.0, 100.0]: "
+                    "Expecting a number in the range of [1.0, 100.0]: "
                     + args[i]);
                 throw e;
               }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -2380,7 +2379,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       // necessary. In that case, put block on a possibly-will-
       // be-replicated list.
       //
-      INode fileINode = blocksMap.getINode(block);
+      INodeFile fileINode = blocksMap.getINode(block);
       if (fileINode != null) {
         namesystem.decrementSafeBlockCount(block);
         updateNeededReplications(block, -1, 0);
@@ -2612,7 +2611,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       NumberReplicas num) {
     int curReplicas = num.liveReplicas();
     int curExpectedReplicas = getReplication(block);
-    INode fileINode = blocksMap.getINode(block);
+    INodeFile fileINode = blocksMap.getINode(block);
     Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
@@ -2663,7 +2662,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      INode fileINode = blocksMap.getINode(block);
+      INodeFile fileINode = blocksMap.getINode(block);
 
       if (fileINode != null) {
         NumberReplicas num = countNodes(block);

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -235,6 +235,9 @@ class BPServiceActor implements Runnable {
   }
 
   void reportBadBlocks(ExtendedBlock block) {
+    if (bpRegistration == null) {
+      return;
+    }
     DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
     LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; 
     

+ 18 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.SocketInputWrapper;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
@@ -83,13 +84,24 @@ class DataXceiver extends Receiver implements Runnable {
   private final DataXceiverServer dataXceiverServer;
 
   private long opStartTime; //the start time of receiving an Op
+  private final SocketInputWrapper socketInputWrapper;
   
-  public DataXceiver(Socket s, DataNode datanode, 
+  public static DataXceiver create(Socket s, DataNode dn,
+      DataXceiverServer dataXceiverServer) throws IOException {
+    
+    SocketInputWrapper iw = NetUtils.getInputStream(s);
+    return new DataXceiver(s, iw, dn, dataXceiverServer);
+  }
+  
+  private DataXceiver(Socket s, 
+      SocketInputWrapper socketInput,
+      DataNode datanode, 
       DataXceiverServer dataXceiverServer) throws IOException {
     super(new DataInputStream(new BufferedInputStream(
-        NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE)));
+        socketInput, HdfsConstants.SMALL_BUFFER_SIZE)));
 
     this.s = s;
+    this.socketInputWrapper = socketInput;
     this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
     this.datanode = datanode;
     this.dnConf = datanode.getDnConf();
@@ -128,8 +140,6 @@ class DataXceiver extends Receiver implements Runnable {
     Op op = null;
     dataXceiverServer.childSockets.add(s);
     try {
-      int stdTimeout = s.getSoTimeout();
-
       // We process requests in a loop, and stay around for a short timeout.
       // This optimistic behaviour allows the other end to reuse connections.
       // Setting keepalive timeout to 0 disable this behavior.
@@ -139,7 +149,9 @@ class DataXceiver extends Receiver implements Runnable {
         try {
           if (opsProcessed != 0) {
             assert dnConf.socketKeepaliveTimeout > 0;
-            s.setSoTimeout(dnConf.socketKeepaliveTimeout);
+            socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
+          } else {
+            socketInputWrapper.setTimeout(dnConf.socketTimeout);
           }
           op = readOp();
         } catch (InterruptedIOException ignored) {
@@ -160,7 +172,7 @@ class DataXceiver extends Receiver implements Runnable {
 
         // restore normal timeout
         if (opsProcessed != 0) {
-          s.setSoTimeout(stdTimeout);
+          s.setSoTimeout(dnConf.socketTimeout);
         }
 
         opStartTime = now();

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java

@@ -135,6 +135,7 @@ class DataXceiverServer implements Runnable {
       try {
         s = ss.accept();
         s.setTcpNoDelay(true);
+        // Timeouts are set within DataXceiver.run()
 
         // Make sure the xceiver count is not exceeded
         int curXceiverCount = datanode.getXceiverCount();
@@ -144,7 +145,8 @@ class DataXceiverServer implements Runnable {
               + maxXceiverCount);
         }
 
-        new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this))
+        new Daemon(datanode.threadGroup,
+            DataXceiver.create(s, datanode, this))
             .start();
       } catch (SocketTimeoutException ignored) {
         // wake up to see if should continue to run

+ 25 - 24
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java

@@ -15,31 +15,32 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.hdfs.server.namenode;
 
-package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
+import java.io.File;
+import java.io.IOException;
 
-import org.apache.hadoop.yarn.api.records.NodeState;
-
-//TODO yarn.api.records.NodeState is a clone of RMNodeState made for MR-3353. 
-// In a subsequent patch RMNodeState should be replaced with NodeState
-public enum RMNodeState {
-  NEW, RUNNING, UNHEALTHY, DECOMMISSIONED, LOST, REBOOTED;
+/**
+ * Utility class to faciliate some fault injection tests for the checkpointing
+ * process.
+ */
+class CheckpointFaultInjector {
+  static CheckpointFaultInjector instance = new CheckpointFaultInjector();
+  
+  static CheckpointFaultInjector getInstance() {
+    return instance;
+  }
   
-  public static NodeState toNodeState(RMNodeState state) {
-    switch(state) {
-    case NEW:
-      return NodeState.NEW;
-    case RUNNING:
-      return NodeState.RUNNING;
-    case UNHEALTHY:
-      return NodeState.UNHEALTHY;
-    case DECOMMISSIONED:
-      return NodeState.DECOMMISSIONED;
-    case LOST:
-      return NodeState.LOST;
-    case REBOOTED:
-      return NodeState.REBOOTED;
-    }
-    return null;
+  public void beforeGetImageSetsHeaders() throws IOException {}
+  public void afterSecondaryCallsRollEditLog() throws IOException {}
+  public void afterSecondaryUploadsNewImage() throws IOException {}
+  public void aboutToSendFile(File localfile) throws IOException {}
+
+  public boolean shouldSendShortFile(File localfile) {
+    return false;
   }
-};
+  public boolean shouldCorruptAByte(File localfile) {
+    return false;
+  }
+  
+}

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -119,6 +119,7 @@ public class GetImageServlet extends HttpServlet {
             if (imageFile == null) {
               throw new IOException(errorMessage);
             }
+            CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders();
             setFileNameHeaders(response, imageFile);
             setVerificationHeaders(response, imageFile);
             // send fsImage
@@ -189,8 +190,8 @@ public class GetImageServlet extends HttpServlet {
         }       
       });
       
-    } catch (Exception ie) {
-      String errMsg = "GetImage failed. " + StringUtils.stringifyException(ie);
+    } catch (Throwable t) {
+      String errMsg = "GetImage failed. " + StringUtils.stringifyException(t);
       response.sendError(HttpServletResponse.SC_GONE, errMsg);
       throw new IOException(errMsg);
     } finally {

+ 22 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -29,12 +30,15 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.primitives.SignedBytes;
+
 /**
  * We keep an in-memory representation of the file/block hierarchy.
  * This is a base INode class containing common fields for file and 
  * directory inodes.
  */
-public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+@InterfaceAudience.Private
+abstract class INode implements Comparable<byte[]>, FSInodeInfo {
   /*
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.
@@ -141,8 +145,7 @@ public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
   protected PermissionStatus getPermissionStatus() {
     return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
   }
-  private synchronized void updatePermissionStatus(
-      PermissionStatusFormat f, long n) {
+  private void updatePermissionStatus(PermissionStatusFormat f, long n) {
     permission = f.combine(n, permission);
   }
   /** Get user name */
@@ -398,48 +401,30 @@ public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
     }
   }
 
-  //
-  // Comparable interface
-  //
-  public int compareTo(byte[] o) {
-    return compareBytes(name, o);
+  private static final byte[] EMPTY_BYTES = {};
+
+  @Override
+  public final int compareTo(byte[] bytes) {
+    final byte[] left = name == null? EMPTY_BYTES: name;
+    final byte[] right = bytes == null? EMPTY_BYTES: bytes;
+    return SignedBytes.lexicographicalComparator().compare(left, right);
   }
 
-  public boolean equals(Object o) {
-    if (!(o instanceof INode)) {
+  @Override
+  public final boolean equals(Object that) {
+    if (this == that) {
+      return true;
+    }
+    if (that == null || !(that instanceof INode)) {
       return false;
     }
-    return Arrays.equals(this.name, ((INode)o).name);
+    return Arrays.equals(this.name, ((INode)that).name);
   }
 
-  public int hashCode() {
+  @Override
+  public final int hashCode() {
     return Arrays.hashCode(this.name);
   }
-
-  //
-  // static methods
-  //
-  /**
-   * Compare two byte arrays.
-   * 
-   * @return a negative integer, zero, or a positive integer 
-   * as defined by {@link #compareTo(byte[])}.
-   */
-  static int compareBytes(byte[] a1, byte[] a2) {
-    if (a1==a2)
-        return 0;
-    int len1 = (a1==null ? 0 : a1.length);
-    int len2 = (a2==null ? 0 : a2.length);
-    int n = Math.min(len1, len2);
-    byte b1, b2;
-    for (int i=0; i<n; i++) {
-      b1 = a1[i];
-      b2 = a2[i];
-      if (b1 != b2)
-        return b1 - b2;
-    }
-    return len1 - len2;
-  }
   
   /**
    * Create an INode; the inode's name is not set yet

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -173,9 +173,9 @@ class INodeDirectory extends INode {
    */
   int getExistingPathINodes(byte[][] components, INode[] existing, 
       boolean resolveLink) throws UnresolvedLinkException {
-    assert compareBytes(this.name, components[0]) == 0 :
-      "Incorrect name " + getLocalName() + " expected " + 
-      DFSUtil.bytes2String(components[0]);
+    assert this.compareTo(components[0]) == 0 :
+        "Incorrect name " + getLocalName() + " expected "
+        + (components[0] == null? null: DFSUtil.bytes2String(components[0]));
 
     INode curNode = this;
     int count = 0;
@@ -317,8 +317,7 @@ class INodeDirectory extends INode {
                               INode newNode,
                               INodeDirectory parent,
                               boolean propagateModTime
-                              ) throws FileNotFoundException, 
-                                       UnresolvedLinkException {
+                              ) throws FileNotFoundException {
     // insert into the parent children list
     newNode.name = localname;
     if(parent.addChild(newNode, propagateModTime) == null)

+ 31 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -25,6 +25,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
@@ -674,10 +675,14 @@ public class NameNode {
     initializeGenericKeys(conf, nsId, namenodeId);
     checkAllowFormat(conf);
     
-    Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
+    List<URI> dirsToPrompt = new ArrayList<URI>();
+    dirsToPrompt.addAll(nameDirsToFormat);
+    dirsToPrompt.addAll(sharedDirs);
     List<URI> editDirsToFormat = 
                  FSNamesystem.getNamespaceEditsDirs(conf);
-    if (!confirmFormat(dirsToFormat, force, isInteractive)) {
+    if (!confirmFormat(dirsToPrompt, force, isInteractive)) {
       return true; // aborted
     }
 
@@ -689,7 +694,7 @@ public class NameNode {
     }
     System.out.println("Formatting using clusterid: " + clusterId);
     
-    FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
+    FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
     fsImage.format(fsn, clusterId);
     return false;
@@ -711,7 +716,18 @@ public class NameNode {
       boolean force, boolean interactive)
       throws IOException {
     for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
-      File curDir = new File(it.next().getPath());
+      URI dirUri = it.next();
+      if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
+        System.err.println("Skipping format for directory \"" + dirUri
+            + "\". Can only format local directories with scheme \""
+            + NNStorage.LOCAL_URI_SCHEME + "\".");
+        continue;
+      }
+      // To validate only file based schemes are formatted
+      assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) :
+        "formatting is not supported for " + dirUri;
+
+      File curDir = new File(dirUri.getPath());
       // Its alright for a dir not to exist, or to exist (properly accessible)
       // and be completely empty.
       if (!curDir.exists() ||
@@ -1114,20 +1130,18 @@ public class NameNode {
    */
   public static void initializeGenericKeys(Configuration conf,
       String nameserviceId, String namenodeId) {
-    if ((nameserviceId == null || nameserviceId.isEmpty()) && 
-        (namenodeId == null || namenodeId.isEmpty())) {
-      return;
-    }
-    
-    if (nameserviceId != null) {
-      conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
-    }
-    if (namenodeId != null) {
-      conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
+    if ((nameserviceId != null && !nameserviceId.isEmpty()) || 
+        (namenodeId != null && !namenodeId.isEmpty())) {
+      if (nameserviceId != null) {
+        conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+      }
+      if (namenodeId != null) {
+        conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
+      }
+      
+      DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
+          NAMESERVICE_SPECIFIC_KEYS);
     }
-    
-    DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
-        NAMESERVICE_SPECIFIC_KEYS);
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));

+ 2 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -47,7 +47,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -476,10 +475,7 @@ public class SecondaryNameNode implements Runnable {
     }
 
     // error simulation code for junit test
-    if (ErrorSimulator.getErrorSimulation(0)) {
-      throw new IOException("Simulating error0 " +
-                            "after creating edits.new");
-    }
+    CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
 
     RemoteEditLogManifest manifest =
       namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
@@ -497,10 +493,7 @@ public class SecondaryNameNode implements Runnable {
         dstStorage, txid);
 
     // error simulation code for junit test
-    if (ErrorSimulator.getErrorSimulation(1)) {
-      throw new IOException("Simulating error1 " +
-                            "after uploading new image to NameNode");
-    }
+    CheckpointFaultInjector.getInstance().afterSecondaryUploadsNewImage();
 
     LOG.warn("Checkpoint done. New Image Size: " 
              + dstStorage.getFsImageName(txid).length());

+ 7 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -153,15 +152,12 @@ public class TransferFsImage {
     FileInputStream infile = null;
     try {
       infile = new FileInputStream(localfile);
-      if (ErrorSimulator.getErrorSimulation(2)
-          && localfile.getAbsolutePath().contains("secondary")) {
-        // throw exception only when the secondary sends its image
-        throw new IOException("If this exception is not caught by the " +
-            "name-node fs image will be truncated.");
-      }
+      CheckpointFaultInjector.getInstance()
+          .aboutToSendFile(localfile);
       
-      if (ErrorSimulator.getErrorSimulation(3)
-          && localfile.getAbsolutePath().contains("fsimage")) {
+
+      if (CheckpointFaultInjector.getInstance().
+            shouldSendShortFile(localfile)) {
           // Test sending image shorter than localfile
           long len = localfile.length();
           buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
@@ -175,8 +171,8 @@ public class TransferFsImage {
         if (num <= 0) {
           break;
         }
-
-        if (ErrorSimulator.getErrorSimulation(4)) {
+        if (CheckpointFaultInjector.getInstance()
+              .shouldCorruptAByte(localfile)) {
           // Simulate a corrupted byte on the wire
           LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
           buf[0]++;

+ 1 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java

@@ -17,22 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.io.Writable;
-
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 
 /**
  * An enumeration of logs available on a remote NameNode.
  */
-public class RemoteEditLogManifest implements Writable {
+public class RemoteEditLogManifest {
 
   private List<RemoteEditLog> logs;
   
@@ -75,25 +69,4 @@ public class RemoteEditLogManifest implements Writable {
   public String toString() {
     return "[" + Joiner.on(", ").join(logs) + "]";
   }
-  
-  
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(logs.size());
-    for (RemoteEditLog log : logs) {
-      log.write(out);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int numLogs = in.readInt();
-    logs = Lists.newArrayList();
-    for (int i = 0; i < numLogs; i++) {
-      RemoteEditLog log = new RemoteEditLog();
-      log.readFields(in);
-      logs.add(log);
-    }
-    checkState();
-  }
 }

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -319,6 +319,25 @@ public class TestDFSUtil {
     }
   }
 
+  /**
+   * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
+   * Federation is enabled.
+   * 
+   * Regression test for HDFS-3351.
+   */
+  @Test
+  public void testConfModificationNoFederationOrHa() {
+    final HdfsConfiguration conf = new HdfsConfiguration();
+    String nsId = null;
+    String nnId = null;
+    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");
+
+    assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
+    NameNode.initializeGenericKeys(conf, nsId, nnId);
+    assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
+  }
+
   /**
    * Regression test for HDFS-2934.
    */

+ 159 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java

@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.junit.Assert.*;
+
+import java.io.PrintWriter;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestDataTransferKeepalive {
+  Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+  private FileSystem fs;
+  private InetSocketAddress dnAddr;
+  private DataNode dn;
+  private DFSClient dfsClient;
+  private static Path TEST_FILE = new Path("/test");
+  
+  private static final int KEEPALIVE_TIMEOUT = 1000;
+  private static final int WRITE_TIMEOUT = 3000;
+  
+  @Before
+  public void setup() throws Exception {
+    conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
+        KEEPALIVE_TIMEOUT);
+    
+    cluster = new MiniDFSCluster.Builder(conf)
+      .numDataNodes(1).build();
+    fs = cluster.getFileSystem();
+    dfsClient = ((DistributedFileSystem)fs).dfs;
+
+    String poolId = cluster.getNamesystem().getBlockPoolId();
+    dn = cluster.getDataNodes().get(0);
+    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
+        dn, poolId);
+    dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
+  }
+  
+  @After
+  public void teardown() {
+    cluster.shutdown();
+  }
+  
+  /**
+   * Regression test for HDFS-3357. Check that the datanode is respecting
+   * its configured keepalive timeout.
+   */
+  @Test(timeout=30000)
+  public void testKeepaliveTimeouts() throws Exception {
+    DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
+
+    // Clients that write aren't currently re-used.
+    assertEquals(0, dfsClient.socketCache.size());
+    assertXceiverCount(0);
+
+    // Reads the file, so we should get a
+    // cached socket, and should have an xceiver on the other side.
+    DFSTestUtil.readFile(fs, TEST_FILE);
+    assertEquals(1, dfsClient.socketCache.size());
+    assertXceiverCount(1);
+
+    // Sleep for a bit longer than the keepalive timeout
+    // and make sure the xceiver died.
+    Thread.sleep(KEEPALIVE_TIMEOUT * 2);
+    assertXceiverCount(0);
+    
+    // The socket is still in the cache, because we don't
+    // notice that it's closed until we try to read
+    // from it again.
+    assertEquals(1, dfsClient.socketCache.size());
+    
+    // Take it out of the cache - reading should
+    // give an EOF.
+    Socket s = dfsClient.socketCache.get(dnAddr);
+    assertNotNull(s);
+    assertEquals(-1, NetUtils.getInputStream(s).read());
+  }
+
+  /**
+   * Test for the case where the client beings to read a long block, but doesn't
+   * read bytes off the stream quickly. The datanode should time out sending the
+   * chunks and the transceiver should die, even if it has a long keepalive.
+   */
+  @Test(timeout=30000)
+  public void testSlowReader() throws Exception {
+    // Restart the DN with a shorter write timeout.
+    DataNodeProperties props = cluster.stopDataNode(0);
+    props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
+        WRITE_TIMEOUT);
+    props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
+        120000);
+    assertTrue(cluster.restartDataNode(props, true));
+    // Wait for heartbeats to avoid a startup race where we
+    // try to write the block while the DN is still starting.
+    cluster.triggerHeartbeats();
+    
+    dn = cluster.getDataNodes().get(0);
+    
+    DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
+    FSDataInputStream stm = fs.open(TEST_FILE);
+    try {
+      stm.read();
+      assertXceiverCount(1);
+
+      Thread.sleep(WRITE_TIMEOUT + 1000);
+      // DN should time out in sendChunks, and this should force
+      // the xceiver to exit.
+      assertXceiverCount(0);
+    } finally {
+      IOUtils.closeStream(stm);
+    }
+  }
+
+  private void assertXceiverCount(int expected) {
+    // Subtract 1, since the DataXceiverServer
+    // counts as one
+    int count = dn.getXceiverCount() - 1;
+    if (count != expected) {
+      ReflectionUtils.printThreadInfo(
+          new PrintWriter(System.err),
+          "Thread dumps");
+      fail("Expected " + expected + " xceivers, found " +
+          count);
+    }
+  }
+}

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -85,6 +85,7 @@ public class TestDistributedFileSystem {
   /**
    * Tests DFSClient.close throws no ConcurrentModificationException if 
    * multiple files are open.
+   * Also tests that any cached sockets are closed. (HDFS-3359)
    */
   @Test
   public void testDFSClose() throws Exception {
@@ -94,11 +95,23 @@ public class TestDistributedFileSystem {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       FileSystem fileSys = cluster.getFileSystem();
       
-      // create two files
+      // create two files, leaving them open
       fileSys.create(new Path("/test/dfsclose/file-0"));
       fileSys.create(new Path("/test/dfsclose/file-1"));
+      
+      // create another file, close it, and read it, so
+      // the client gets a socket in its SocketCache
+      Path p = new Path("/non-empty-file");
+      DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
+      DFSTestUtil.readFile(fileSys, p);
+      
+      DFSClient client = ((DistributedFileSystem)fileSys).dfs;
+      SocketCache cache = client.socketCache;
+      assertEquals(1, cache.size());
 
       fileSys.close();
+      
+      assertEquals(0, cache.size());
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

+ 31 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -26,8 +26,6 @@ import java.util.List;
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -46,11 +44,14 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * This class tests if a balancer schedules tasks correctly.
  */
-public class TestBalancer extends TestCase {
+public class TestBalancer {
   private static final Log LOG = LogFactory.getLog(
   "org.apache.hadoop.hdfs.TestBalancer");
   
@@ -365,8 +366,33 @@ public class TestBalancer extends TestCase {
     oneNodeTest(conf);
   }
   
+  /**
+   * Test parse method in Balancer#Cli class with threshold value out of
+   * boundaries.
+   */
+  @Test
+  public void testBalancerCliParseWithThresholdOutOfBoundaries() {
+    String parameters[] = new String[] { "-threshold", "0" };
+    String reason = "IllegalArgumentException is expected when threshold value"
+        + " is out of boundary.";
+    try {
+      Balancer.Cli.parse(parameters);
+      fail(reason);
+    } catch (IllegalArgumentException e) {
+      assertEquals("Number out of range: threshold = 0.0", e.getMessage());
+    }
+    parameters = new String[] { "-threshold", "101" };
+    try {
+      Balancer.Cli.parse(parameters);
+      fail(reason);
+    } catch (IllegalArgumentException e) {
+      assertEquals("Number out of range: threshold = 101.0", e.getMessage());
+    }
+  }
+  
   /** Test a cluster with even distribution, 
    * then a new empty node is added to the cluster*/
+  @Test
   public void testBalancer0() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);
@@ -375,6 +401,7 @@ public class TestBalancer extends TestCase {
   }
 
   /** Test unevenly distributed cluster */
+  @Test
   public void testBalancer1() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);
@@ -384,6 +411,7 @@ public class TestBalancer extends TestCase {
         new String[] {RACK0, RACK1});
   }
   
+  @Test
   public void testBalancer2() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);

+ 36 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java

@@ -27,13 +27,19 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -144,4 +150,34 @@ public class TestAllowFormat {
     NameNode.format(config);
     LOG.info("Done verifying format will succeed with allowformat true");
   }
+
+  /**
+   * Test to skip format for non file scheme directory configured
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    String logicalName = "mycluster";
+
+    // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
+    // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+    // is considered.
+    String localhost = "127.0.0.1";
+    InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
+    InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
+    HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+
+    conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
+    conf.set(DFSUtil.addKeySuffixes(
+        DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
+        DummyJournalManager.class.getName());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
+        + localhost + ":2181/ledgers");
+    conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
+
+    // An internal assert is added to verify the working of test
+    NameNode.format(conf);
+  }
 }

+ 73 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -42,7 +42,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -63,6 +62,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -93,11 +93,15 @@ public class TestCheckpoint extends TestCase {
   static final int fileSize = 8192;
   static final int numDatanodes = 3;
   short replication = 3;
+
+  private CheckpointFaultInjector faultInjector;
     
   @Override
   public void setUp() throws IOException {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
-    ErrorSimulator.initializeErrorSimulationEvent(5);
+    
+    faultInjector = Mockito.mock(CheckpointFaultInjector.class);
+    CheckpointFaultInjector.instance = faultInjector;
   }
 
   static void writeFile(FileSystem fileSys, Path name, int repl)
@@ -222,14 +226,18 @@ public class TestCheckpoint extends TestCase {
       // Make the checkpoint fail after rolling the edits log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(0);
+      
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(0);
+      
+      Mockito.reset(faultInjector);
       secondary.shutdown();
 
       //
@@ -282,14 +290,17 @@ public class TestCheckpoint extends TestCase {
       // Make the checkpoint fail after uploading the new fsimage.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(1);
+      
+      Mockito.doThrow(new IOException(
+          "Injecting failure after uploading new image"))
+          .when(faultInjector).afterSecondaryUploadsNewImage();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(1);
+      Mockito.reset(faultInjector);
       secondary.shutdown();
 
       //
@@ -341,14 +352,17 @@ public class TestCheckpoint extends TestCase {
       // Make the checkpoint fail after rolling the edit log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(0);
+
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(0);
+      Mockito.reset(faultInjector);
       secondary.shutdown(); // secondary namenode crash!
 
       // start new instance of secondary and verify that 
@@ -395,6 +409,28 @@ public class TestCheckpoint extends TestCase {
    * Used to truncate primary fsimage file.
    */
   public void testSecondaryFailsToReturnImage() throws IOException {
+    Mockito.doThrow(new IOException("If this exception is not caught by the " +
+        "name-node, fs image will be truncated."))
+        .when(faultInjector).aboutToSendFile(filePathContaining("secondary"));
+
+    doSecondaryFailsToReturnImage();
+  }
+  
+  /**
+   * Similar to above test, but uses an unchecked Error, and causes it
+   * before even setting the length header. This used to cause image
+   * truncation. Regression test for HDFS-3330.
+   */
+  public void testSecondaryFailsWithErrorBeforeSettingHeaders()
+      throws IOException {
+    Mockito.doThrow(new Error("If this exception is not caught by the " +
+        "name-node, fs image will be truncated."))
+        .when(faultInjector).beforeGetImageSetsHeaders();
+
+    doSecondaryFailsToReturnImage();
+  }
+
+  private void doSecondaryFailsToReturnImage() throws IOException {
     LOG.info("Starting testSecondaryFailsToReturnImage");
     Configuration conf = new HdfsConfiguration();
     Path file1 = new Path("checkpointRI.dat");
@@ -414,7 +450,6 @@ public class TestCheckpoint extends TestCase {
       // Make the checkpoint
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(2);
 
       try {
         secondary.doCheckpoint();  // this should fail
@@ -424,7 +459,7 @@ public class TestCheckpoint extends TestCase {
         GenericTestUtils.assertExceptionContains(
             "If this exception is not caught", e);
       }
-      ErrorSimulator.clearErrorSimulation(2);
+      Mockito.reset(faultInjector);
 
       // Verify that image file sizes did not change.
       for (StorageDirectory sd2 :
@@ -442,6 +477,17 @@ public class TestCheckpoint extends TestCase {
     }
   }
 
+  private File filePathContaining(final String substring) {
+    return Mockito.<File>argThat(
+        new ArgumentMatcher<File>() {
+          @Override
+          public boolean matches(Object argument) {
+            String path = ((File)argument).getAbsolutePath();
+            return path.contains(substring);
+          }
+        });
+  }
+
   /**
    * Simulate 2NN failing to send the whole file (error type 3)
    * The length header in the HTTP transfer should prevent
@@ -450,7 +496,10 @@ public class TestCheckpoint extends TestCase {
   public void testNameNodeImageSendFailWrongSize()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongSize");
-    doSendFailTest(3, "is not of the advertised size");
+    
+    Mockito.doReturn(true).when(faultInjector)
+      .shouldSendShortFile(filePathContaining("fsimage"));
+    doSendFailTest("is not of the advertised size");
   }
 
   /**
@@ -461,19 +510,21 @@ public class TestCheckpoint extends TestCase {
   public void testNameNodeImageSendFailWrongDigest()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongDigest");
-    doSendFailTest(4, "does not match advertised digest");
+
+    Mockito.doReturn(true).when(faultInjector)
+        .shouldCorruptAByte(Mockito.any(File.class));
+    doSendFailTest("does not match advertised digest");
   }
 
   /**
    * Run a test where the 2NN runs into some kind of error when
    * sending the checkpoint back to the NN.
-   * @param errorType the ErrorSimulator type to trigger
    * @param exceptionSubstring an expected substring of the triggered exception
    */
-  private void doSendFailTest(int errorType, String exceptionSubstring)
+  private void doSendFailTest(String exceptionSubstring)
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    Path file1 = new Path("checkpoint-doSendFailTest-" + errorType + ".dat");
+    Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(numDatanodes)
                                                .build();
@@ -485,7 +536,6 @@ public class TestCheckpoint extends TestCase {
       // Make the checkpoint fail after rolling the edit log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(errorType);
 
       try {
         secondary.doCheckpoint();  // this should fail
@@ -494,7 +544,7 @@ public class TestCheckpoint extends TestCase {
         // We only sent part of the image. Have to trigger this exception
         GenericTestUtils.assertExceptionContains(exceptionSubstring, e);
       }
-      ErrorSimulator.clearErrorSimulation(errorType);
+      Mockito.reset(faultInjector);
       secondary.shutdown(); // secondary namenode crash!
 
       // start new instance of secondary and verify that 
@@ -1017,7 +1067,9 @@ public class TestCheckpoint extends TestCase {
   
       secondary = startSecondaryNameNode(conf);
 
-      ErrorSimulator.setErrorSimulation(1);
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
       
       // Fail to checkpoint once
       try {
@@ -1025,7 +1077,7 @@ public class TestCheckpoint extends TestCase {
         fail("Should have failed upload");
       } catch (IOException ioe) {
         LOG.info("Got expected failure", ioe);
-        assertTrue(ioe.toString().contains("Simulating error1"));
+        assertTrue(ioe.toString().contains("Injecting failure"));
       }
 
       // Fail to checkpoint again
@@ -1034,9 +1086,9 @@ public class TestCheckpoint extends TestCase {
         fail("Should have failed upload");
       } catch (IOException ioe) {
         LOG.info("Got expected failure", ioe);
-        assertTrue(ioe.toString().contains("Simulating error1"));
+        assertTrue(ioe.toString().contains("Injecting failure"));
       } finally {
-        ErrorSimulator.clearErrorSimulation(1);
+        Mockito.reset(faultInjector);
       }
 
       // Now with the cleared error simulation, it should succeed

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java

@@ -167,6 +167,15 @@ public abstract class HATestUtil {
       Configuration conf, String logicalName, int nsIndex) {
     InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
     InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
+    setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+  }
+
+  /**
+   * Sets the required configurations for performing failover
+   */
+  public static void setFailoverConfigurations(Configuration conf,
+      String logicalName, InetSocketAddress nnAddr1,
+      InetSocketAddress nnAddr2) {
     String nameNodeId1 = "nn1";
     String nameNodeId2 = "nn2";
     String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();

+ 28 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -160,6 +160,12 @@ Release 2.0.0 - UNRELEASED
     MAPREDUCE-3883. Document yarn.nodemanager.delete.debug-delay-sec 
     configuration property (Eugene Koontz via tgraves)
 
+    MAPREDUCE-4219. make default container-executor.conf.dir be a path 
+    relative to the container-executor binary. (rvs via tucu)
+
+    MAPREDUCE-4205. retrofit all JVM shutdown hooks to use ShutdownHookManager 
+    (tucu)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -269,6 +275,15 @@ Release 2.0.0 - UNRELEASED
     MAPREDUCE-4193. broken doc link for yarn-default.xml in site.xml.
     (phunt via tomwhite)
 
+    MAPREDUCE-4202. TestYarnClientProtocolProvider is broken (Daryn Sharp via
+    bobby)
+
+    MAPREDUCE-3173. MRV2 UI doesn't work properly without internet (Devaraj K
+    via bobby)
+
+    MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
+    (Bikas Saha via bobby)
+
 Release 0.23.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -304,6 +319,8 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-4079. Allow MR AppMaster to limit ephemeral port range.
     (bobby via tgraves)
 
+    MAPREDUCE-4210. Expose listener address for WebApp (Daryn Sharp via bobby)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -430,6 +447,17 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-4206. Sorting by Last Health-Update on the RM nodes page sorts
     does not work correctly (Jonathon Eagles via tgraves)
 
+    MAPREDUCE-4212. TestJobClientGetJob sometimes fails 
+    (Daryn Sharp via tgraves)
+
+    MAPREDUCE-4211. Error conditions (missing appid, appid not found) are 
+    masked in the RM app page (Jonathan Eagles via bobby)
+
+    MAPREDUCE-4163. consistently set the bind address (Daryn Sharp via bobby)
+
+    MAPREDUCE-4048. NullPointerException exception while accessing the
+    Application Master UI (Devaraj K via bobby)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 0 - 18
hadoop-mapreduce-project/dev-support/test-patch.properties

@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OK_RELEASEAUDIT_WARNINGS=2
-OK_FINDBUGS_WARNINGS=0
-OK_JAVADOC_WARNINGS=0

+ 1 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -127,10 +126,7 @@ public class TaskAttemptListenerImpl extends CompositeService
       }
 
       server.start();
-      InetSocketAddress listenerAddress = server.getListenerAddress();
-      listenerAddress.getAddress();
-      this.address = NetUtils.createSocketAddr(InetAddress.getLocalHost()
-        .getCanonicalHostName() + ":" + listenerAddress.getPort());
+      this.address = NetUtils.getConnectAddress(server);
     } catch (IOException e) {
       throw new YarnException(e);
     }

+ 9 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -90,6 +90,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.ClusterInfo;
 import org.apache.hadoop.yarn.SystemClock;
@@ -130,6 +131,11 @@ public class MRAppMaster extends CompositeService {
 
   private static final Log LOG = LogFactory.getLog(MRAppMaster.class);
 
+  /**
+   * Priority of the MRAppMaster shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
   private Clock clock;
   private final long startTime;
   private final long appSubmitTime;
@@ -990,8 +996,8 @@ public class MRAppMaster extends CompositeService {
           new MRAppMaster(applicationAttemptId, containerId, nodeHostString,
               Integer.parseInt(nodePortString),
               Integer.parseInt(nodeHttpPortString), appSubmitTime);
-      Runtime.getRuntime().addShutdownHook(
-        new MRAppMasterShutdownHook(appMaster));
+      ShutdownHookManager.get().addShutdownHook(
+        new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);
       YarnConfiguration conf = new YarnConfiguration(new JobConf());
       conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
       String jobUserName = System
@@ -1010,7 +1016,7 @@ public class MRAppMaster extends CompositeService {
 
   // The shutdown hook that runs when a signal is received AND during normal
   // close of the JVM.
-  static class MRAppMasterShutdownHook extends Thread {
+  static class MRAppMasterShutdownHook implements Runnable {
     MRAppMaster appMaster;
     MRAppMasterShutdownHook(MRAppMaster appMaster) {
       this.appMaster = appMaster;
@@ -1028,12 +1034,6 @@ public class MRAppMaster extends CompositeService {
         appMaster.jobHistoryEventHandler.setSignalled(true);
       }
       appMaster.stop();
-      try {
-        //Close all the FileSystem objects
-        FileSystem.closeAll();
-      } catch (IOException ioe) {
-        LOG.warn("Failed to close all FileSystem objects", ioe);
-      }
     }
   }
 

+ 2 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.client;
 
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.Collection;
 
@@ -78,7 +76,6 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -116,13 +113,7 @@ public class MRClientService extends AbstractService
   public void start() {
     Configuration conf = getConfig();
     YarnRPC rpc = YarnRPC.create(conf);
-    InetSocketAddress address = NetUtils.createSocketAddr("0.0.0.0:0");
-    InetAddress hostNameResolved = null;
-    try {
-      hostNameResolved = InetAddress.getLocalHost();
-    } catch (UnknownHostException e) {
-      throw new YarnException(e);
-    }
+    InetSocketAddress address = new InetSocketAddress(0);
 
     ClientToAMSecretManager secretManager = null;
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -150,9 +141,7 @@ public class MRClientService extends AbstractService
     }
 
     server.start();
-    this.bindAddress =
-        NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
-            + ":" + server.getPort());
+    this.bindAddress = NetUtils.getConnectAddress(server);
     LOG.info("Instantiated MRClientService at " + this.bindAddress);
     try {
       webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").with(conf).

+ 10 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java

@@ -27,6 +27,8 @@ import java.util.Locale;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -47,6 +49,8 @@ import com.google.inject.Inject;
  * This class renders the various pages that the web app supports.
  */
 public class AppController extends Controller implements AMParams {
+  private static final Log LOG = LogFactory.getLog(AppController.class);
+  
   protected final App app;
   
   protected AppController(App app, Configuration conf, RequestContext ctx,
@@ -220,6 +224,8 @@ public class AppController extends Controller implements AMParams {
             toString().toLowerCase(Locale.US));
         setTitle(join(tt, " Tasks for ", $(JOB_ID)));
       } catch (Exception e) {
+        LOG.error("Failed to render tasks page with task type : "
+            + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e);
         badRequest(e.getMessage());
       }
     }
@@ -283,6 +289,8 @@ public class AppController extends Controller implements AMParams {
 
         render(attemptsPage());
       } catch (Exception e) {
+        LOG.error("Failed to render attempts page with task type : "
+            + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e);
         badRequest(e.getMessage());
       }
     }
@@ -316,7 +324,8 @@ public class AppController extends Controller implements AMParams {
    */
   void badRequest(String s) {
     setStatus(HttpServletResponse.SC_BAD_REQUEST);
-    setTitle(join("Bad request: ", s));
+    String title = "Bad request: ";
+    setTitle((s != null) ? join(title, s) : title);
   }
 
   /**

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java

@@ -356,7 +356,7 @@ public class TestContainerLauncher {
           // make proxy connect to our local containerManager server
           ContainerManager proxy = (ContainerManager) rpc.getProxy(
               ContainerManager.class,
-              NetUtils.createSocketAddr("localhost:" + server.getPort()), conf);
+              NetUtils.getConnectAddress(server), conf);
           return proxy;
         }
       };

+ 71 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestAppController {
+
+  private AppController appController;
+  private RequestContext ctx;
+
+  @Before
+  public void setUp() {
+    AppContext context = mock(AppContext.class);
+    when(context.getApplicationID()).thenReturn(
+        Records.newRecord(ApplicationId.class));
+    App app = new App(context);
+    Configuration conf = new Configuration();
+    ctx = mock(RequestContext.class);
+    appController = new AppController(app, conf, ctx);
+  }
+
+  @Test
+  public void testBadRequest() {
+    String message = "test string";
+    appController.badRequest(message);
+    verifyExpectations(message);
+  }
+
+  @Test
+  public void testBadRequestWithNullMessage() {
+    // It should not throw NullPointerException
+    appController.badRequest(null);
+    verifyExpectations(StringUtils.EMPTY);
+  }
+
+  private void verifyExpectations(String message) {
+    verify(ctx).setStatus(400);
+    verify(ctx).set("app.id", "application_0_0000");
+    verify(ctx).set(eq("rm.web"), anyString());
+    verify(ctx).set("title", "Bad request: " + message);
+  }
+}

+ 3 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java

@@ -33,8 +33,9 @@ public class LocalClientProtocolProvider extends ClientProtocolProvider {
 
   @Override
   public ClientProtocol create(Configuration conf) throws IOException {
-    String framework = conf.get(MRConfig.FRAMEWORK_NAME);
-    if (framework != null && !framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME)) {
+    String framework =
+        conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
+    if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
       return null;
     }
     String tracker = conf.get(JTConfig.JT_IPC_ADDRESS, "local");

+ 3 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClientGetJob.java

@@ -49,8 +49,9 @@ public class TestJobClientGetJob {
     JobConf conf = new JobConf();
     conf.set("mapreduce.framework.name", "local");
     FileInputFormat.addInputPath(conf, createTempFile("in", "hello"));
-    FileOutputFormat.setOutputPath(conf,
-        new Path(TEST_ROOT_DIR, getClass().getSimpleName()));
+    Path outputDir = new Path(TEST_ROOT_DIR, getClass().getSimpleName());
+    outputDir.getFileSystem(conf).delete(outputDir, true);
+    FileOutputFormat.setOutputPath(conf, outputDir);
     JobClient jc = new JobClient(conf);
     RunningJob runningJob = jc.submitJob(conf);
     assertNotNull("Running job", runningJob);

+ 15 - 32
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java

@@ -19,9 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.hs;
 
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
 import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
@@ -76,7 +74,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@@ -117,17 +114,10 @@ public class HistoryClientService extends AbstractService {
     Configuration conf = getConfig();
     YarnRPC rpc = YarnRPC.create(conf);
     initializeWebApp(conf);
-    String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
-        JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
-    InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr,
-      JHAdminConfig.DEFAULT_MR_HISTORY_PORT,
-      JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
-    InetAddress hostNameResolved = null;
-    try {
-      hostNameResolved = InetAddress.getLocalHost(); 
-    } catch (UnknownHostException e) {
-      throw new YarnException(e);
-    }
+    InetSocketAddress address = conf.getSocketAddr(
+        JHAdminConfig.MR_HISTORY_ADDRESS,
+        JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
+        JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
 
     server =
         rpc.getServer(HSClientProtocol.class, protocolHandler, address,
@@ -143,31 +133,24 @@ public class HistoryClientService extends AbstractService {
     }
     
     server.start();
-    this.bindAddress =
-        NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
-            + ":" + server.getPort());
+    this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
+                                              server.getListenerAddress());
     LOG.info("Instantiated MRClientService at " + this.bindAddress);
 
-    if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
-      String resolvedAddress = bindAddress.getHostName() + ":" + bindAddress.getPort();
-      conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, resolvedAddress);
-
-      String hostname = getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
-                                        JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
-      hostname = (hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":")) : hostname;
-      int port = webApp.port();
-      resolvedAddress = hostname + ":" + port;
-      conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, resolvedAddress);
-    }
-
     super.start();
   }
 
   private void initializeWebApp(Configuration conf) {
     webApp = new HsWebApp(history);
-    String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
-        JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
-    WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).at(bindAddress).start(webApp);
+    InetSocketAddress bindAddress = conf.getSocketAddr(
+        JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
+        JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
+        JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT);
+    // NOTE: there should be a .at(InetSocketAddress)
+    WebApps.$for("jobhistory", HistoryClientService.class, this, "ws")
+        .with(conf).at(NetUtils.getHostPortString(bindAddress)).start(webApp);
+    conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
+                           webApp.getListenerAddress());
   }
 
   @Override

+ 10 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -40,6 +41,12 @@ import org.apache.hadoop.yarn.service.CompositeService;
  *
  *****************************************************************/
 public class JobHistoryServer extends CompositeService {
+
+  /**
+   * Priority of the JobHistoryServer shutdown hook.
+   */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
   private static final Log LOG = LogFactory.getLog(JobHistoryServer.class);
   private HistoryContext historyContext;
   private HistoryClientService clientService;
@@ -118,8 +125,9 @@ public class JobHistoryServer extends CompositeService {
     StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
     try {
       JobHistoryServer jobHistoryServer = new JobHistoryServer();
-      Runtime.getRuntime().addShutdownHook(
-          new CompositeServiceShutdownHook(jobHistoryServer));
+      ShutdownHookManager.get().addShutdownHook(
+          new CompositeServiceShutdownHook(jobHistoryServer),
+          SHUTDOWN_HOOK_PRIORITY);
       YarnConfiguration conf = new YarnConfiguration(new JobConf());
       jobHistoryServer.init(conf);
       jobHistoryServer.start();

+ 1 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java

@@ -390,9 +390,7 @@ public class TestClientRedirect {
           rpc.getServer(protocol, this, address,
               conf, null, 1);
       server.start();
-      this.bindAddress =
-        NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
-            + ":" + server.getPort());
+      this.bindAddress = NetUtils.getConnectAddress(server);
        super.start();
        amRunning = true;
     }

+ 15 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java

@@ -28,6 +28,7 @@ import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.LocalJobRunner;
 import org.apache.hadoop.mapred.ResourceMgrDelegate;
 import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
@@ -54,17 +55,26 @@ public class TestYarnClientProtocolProvider extends TestCase {
 
     try {
       cluster = new Cluster(conf);
-      fail("Cluster should not be initialized with out any framework name");
-    } catch (IOException e) {
-
+    } catch (Exception e) {
+      throw new Exception(
+          "Failed to initialize a local runner w/o a cluster framework key", e);
     }
-
+    
+    try {
+      assertTrue("client is not a LocalJobRunner",
+          cluster.getClient() instanceof LocalJobRunner);
+    } finally {
+      if (cluster != null) {
+        cluster.close();
+      }
+    }
+    
     try {
       conf = new Configuration();
       conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
       cluster = new Cluster(conf);
       ClientProtocol client = cluster.getClient();
-      assertTrue(client instanceof YARNRunner);
+      assertTrue("client is a YARNRunner", client instanceof YARNRunner);
     } catch (IOException e) {
 
     } finally {

+ 0 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records;
 
-// TODO NodeState is a clone of RMNodeState made for MR-3353. In a subsequent 
-// patch RMNodeState should be replaced with NodeState
 /**
  * <p>State of a <code>Node</code>.</p>
  */

+ 1 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java

@@ -107,12 +107,11 @@ public class CompositeService extends AbstractService {
    * JVM Shutdown hook for CompositeService which will stop the give
    * CompositeService gracefully in case of JVM shutdown.
    */
-  public static class CompositeServiceShutdownHook extends Thread {
+  public static class CompositeServiceShutdownHook implements Runnable {
 
     private CompositeService compositeService;
 
     public CompositeServiceShutdownHook(CompositeService compositeService) {
-      super("CompositeServiceShutdownHook for " + compositeService.getName());
       this.compositeService = compositeService;
     }
 

+ 9 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.webapp;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -75,6 +76,14 @@ public abstract class WebApp extends ServletModule {
 
   @Provides public HttpServer httpServer() { return httpServer; }
 
+  /**
+   * Get the address the http server is bound to
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getListenerAddress() {
+    return checkNotNull(httpServer, "httpServer").getListenerAddress();
+  }
+	
   public int port() {
     return checkNotNull(httpServer, "httpServer").getPort();
   }

+ 4 - 4
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java

@@ -79,11 +79,11 @@ public class JQueryUI extends HtmlBlock {
   @Override
   protected void render(Block html) {
     html.
-      link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/themes/",
-                getTheme(), "/jquery-ui.css")).
+      link(root_url(join("static/jquery/themes-1.8.16/",
+        getTheme(), "/jquery-ui.css"))).
       link(root_url("static/dt-1.7.5/css/jui-dt.css")).
-      script("https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js").
-      script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/jquery-ui.min.js").
+      script(root_url("static/jquery/jquery.min-1.6.4.js")).
+      script(root_url("static/jquery/jquery-ui.min-1.8.16.js")).
       script(root_url("static/dt-1.7.5/js/jquery.dataTables.min.js")).
       script(root_url("static/yarn.dt.plugins.js")).
       script(root_url("static/themeswitcher.js")).

二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui.min-1.8.16.js.gz


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery.min-1.6.4.js.gz


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_flat_0_aaaaaa_40x100.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_flat_75_ffffff_40x100.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_55_fbf9ee_1x400.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_65_ffffff_1x400.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_75_dadada_1x400.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_75_e6e6e6_1x400.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_glass_95_fef1ec_1x400.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-bg_highlight-soft_75_cccccc_1x100.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_222222_256x240.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_2e83ff_256x240.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_454545_256x240.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_888888_256x240.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/images/ui-icons_cd0a0a_256x240.png


+ 566 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/base/jquery-ui.css

@@ -0,0 +1,566 @@
+/*
+ * jQuery UI CSS Framework 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Theming/API
+ */
+
+/* Layout helpers
+----------------------------------*/
+.ui-helper-hidden { display: none; }
+.ui-helper-hidden-accessible { position: absolute !important; clip: rect(1px 1px 1px 1px); clip: rect(1px,1px,1px,1px); }
+.ui-helper-reset { margin: 0; padding: 0; border: 0; outline: 0; line-height: 1.3; text-decoration: none; font-size: 100%; list-style: none; }
+.ui-helper-clearfix:after { content: "."; display: block; height: 0; clear: both; visibility: hidden; }
+.ui-helper-clearfix { display: inline-block; }
+/* required comment for clearfix to work in Opera \*/
+* html .ui-helper-clearfix { height:1%; }
+.ui-helper-clearfix { display:block; }
+/* end clearfix */
+.ui-helper-zfix { width: 100%; height: 100%; top: 0; left: 0; position: absolute; opacity: 0; filter:Alpha(Opacity=0); }
+
+
+/* Interaction Cues
+----------------------------------*/
+.ui-state-disabled { cursor: default !important; }
+
+
+/* Icons
+----------------------------------*/
+
+/* states and images */
+.ui-icon { display: block; text-indent: -99999px; overflow: hidden; background-repeat: no-repeat; }
+
+
+/* Misc visuals
+----------------------------------*/
+
+/* Overlays */
+.ui-widget-overlay { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }
+/*
+ * jQuery UI Accordion 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Accordion#theming
+ */
+/* IE/Win - Fix animation bug - #4615 */
+.ui-accordion { width: 100%; }
+.ui-accordion .ui-accordion-header { cursor: pointer; position: relative; margin-top: 1px; zoom: 1; }
+.ui-accordion .ui-accordion-li-fix { display: inline; }
+.ui-accordion .ui-accordion-header-active { border-bottom: 0 !important; }
+.ui-accordion .ui-accordion-header a { display: block; font-size: 1em; padding: .5em .5em .5em .7em; }
+.ui-accordion-icons .ui-accordion-header a { padding-left: 2.2em; }
+.ui-accordion .ui-accordion-header .ui-icon { position: absolute; left: .5em; top: 50%; margin-top: -8px; }
+.ui-accordion .ui-accordion-content { padding: 1em 2.2em; border-top: 0; margin-top: -2px; position: relative; top: 1px; margin-bottom: 2px; overflow: auto; display: none; zoom: 1; }
+.ui-accordion .ui-accordion-content-active { display: block; }
+/*
+ * jQuery UI Autocomplete 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Autocomplete#theming
+ */
+.ui-autocomplete { position: absolute; cursor: default; }	
+
+/* workarounds */
+* html .ui-autocomplete { width:1px; } /* without this, the menu expands to 100% in IE6 */
+
+/*
+ * jQuery UI Menu 1.8.16
+ *
+ * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Menu#theming
+ */
+.ui-menu {
+	list-style:none;
+	padding: 2px;
+	margin: 0;
+	display:block;
+	float: left;
+}
+.ui-menu .ui-menu {
+	margin-top: -3px;
+}
+.ui-menu .ui-menu-item {
+	margin:0;
+	padding: 0;
+	zoom: 1;
+	float: left;
+	clear: left;
+	width: 100%;
+}
+.ui-menu .ui-menu-item a {
+	text-decoration:none;
+	display:block;
+	padding:.2em .4em;
+	line-height:1.5;
+	zoom:1;
+}
+.ui-menu .ui-menu-item a.ui-state-hover,
+.ui-menu .ui-menu-item a.ui-state-active {
+	font-weight: normal;
+	margin: -1px;
+}
+/*
+ * jQuery UI Button 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Button#theming
+ */
+.ui-button { display: inline-block; position: relative; padding: 0; margin-right: .1em; text-decoration: none !important; cursor: pointer; text-align: center; zoom: 1; overflow: visible; } /* the overflow property removes extra width in IE */
+.ui-button-icon-only { width: 2.2em; } /* to make room for the icon, a width needs to be set here */
+button.ui-button-icon-only { width: 2.4em; } /* button elements seem to need a little more width */
+.ui-button-icons-only { width: 3.4em; } 
+button.ui-button-icons-only { width: 3.7em; } 
+
+/*button text element */
+.ui-button .ui-button-text { display: block; line-height: 1.4;  }
+.ui-button-text-only .ui-button-text { padding: .4em 1em; }
+.ui-button-icon-only .ui-button-text, .ui-button-icons-only .ui-button-text { padding: .4em; text-indent: -9999999px; }
+.ui-button-text-icon-primary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 1em .4em 2.1em; }
+.ui-button-text-icon-secondary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 2.1em .4em 1em; }
+.ui-button-text-icons .ui-button-text { padding-left: 2.1em; padding-right: 2.1em; }
+/* no icon support for input elements, provide padding by default */
+input.ui-button { padding: .4em 1em; }
+
+/*button icon element(s) */
+.ui-button-icon-only .ui-icon, .ui-button-text-icon-primary .ui-icon, .ui-button-text-icon-secondary .ui-icon, .ui-button-text-icons .ui-icon, .ui-button-icons-only .ui-icon { position: absolute; top: 50%; margin-top: -8px; }
+.ui-button-icon-only .ui-icon { left: 50%; margin-left: -8px; }
+.ui-button-text-icon-primary .ui-button-icon-primary, .ui-button-text-icons .ui-button-icon-primary, .ui-button-icons-only .ui-button-icon-primary { left: .5em; }
+.ui-button-text-icon-secondary .ui-button-icon-secondary, .ui-button-text-icons .ui-button-icon-secondary, .ui-button-icons-only .ui-button-icon-secondary { right: .5em; }
+.ui-button-text-icons .ui-button-icon-secondary, .ui-button-icons-only .ui-button-icon-secondary { right: .5em; }
+
+/*button sets*/
+.ui-buttonset { margin-right: 7px; }
+.ui-buttonset .ui-button { margin-left: 0; margin-right: -.3em; }
+
+/* workarounds */
+button.ui-button::-moz-focus-inner { border: 0; padding: 0; } /* reset extra padding in Firefox */
+/*
+ * jQuery UI Datepicker 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Datepicker#theming
+ */
+.ui-datepicker { width: 17em; padding: .2em .2em 0; display: none; }
+.ui-datepicker .ui-datepicker-header { position:relative; padding:.2em 0; }
+.ui-datepicker .ui-datepicker-prev, .ui-datepicker .ui-datepicker-next { position:absolute; top: 2px; width: 1.8em; height: 1.8em; }
+.ui-datepicker .ui-datepicker-prev-hover, .ui-datepicker .ui-datepicker-next-hover { top: 1px; }
+.ui-datepicker .ui-datepicker-prev { left:2px; }
+.ui-datepicker .ui-datepicker-next { right:2px; }
+.ui-datepicker .ui-datepicker-prev-hover { left:1px; }
+.ui-datepicker .ui-datepicker-next-hover { right:1px; }
+.ui-datepicker .ui-datepicker-prev span, .ui-datepicker .ui-datepicker-next span { display: block; position: absolute; left: 50%; margin-left: -8px; top: 50%; margin-top: -8px;  }
+.ui-datepicker .ui-datepicker-title { margin: 0 2.3em; line-height: 1.8em; text-align: center; }
+.ui-datepicker .ui-datepicker-title select { font-size:1em; margin:1px 0; }
+.ui-datepicker select.ui-datepicker-month-year {width: 100%;}
+.ui-datepicker select.ui-datepicker-month, 
+.ui-datepicker select.ui-datepicker-year { width: 49%;}
+.ui-datepicker table {width: 100%; font-size: .9em; border-collapse: collapse; margin:0 0 .4em; }
+.ui-datepicker th { padding: .7em .3em; text-align: center; font-weight: bold; border: 0;  }
+.ui-datepicker td { border: 0; padding: 1px; }
+.ui-datepicker td span, .ui-datepicker td a { display: block; padding: .2em; text-align: right; text-decoration: none; }
+.ui-datepicker .ui-datepicker-buttonpane { background-image: none; margin: .7em 0 0 0; padding:0 .2em; border-left: 0; border-right: 0; border-bottom: 0; }
+.ui-datepicker .ui-datepicker-buttonpane button { float: right; margin: .5em .2em .4em; cursor: pointer; padding: .2em .6em .3em .6em; width:auto; overflow:visible; }
+.ui-datepicker .ui-datepicker-buttonpane button.ui-datepicker-current { float:left; }
+
+/* with multiple calendars */
+.ui-datepicker.ui-datepicker-multi { width:auto; }
+.ui-datepicker-multi .ui-datepicker-group { float:left; }
+.ui-datepicker-multi .ui-datepicker-group table { width:95%; margin:0 auto .4em; }
+.ui-datepicker-multi-2 .ui-datepicker-group { width:50%; }
+.ui-datepicker-multi-3 .ui-datepicker-group { width:33.3%; }
+.ui-datepicker-multi-4 .ui-datepicker-group { width:25%; }
+.ui-datepicker-multi .ui-datepicker-group-last .ui-datepicker-header { border-left-width:0; }
+.ui-datepicker-multi .ui-datepicker-group-middle .ui-datepicker-header { border-left-width:0; }
+.ui-datepicker-multi .ui-datepicker-buttonpane { clear:left; }
+.ui-datepicker-row-break { clear:both; width:100%; font-size:0em; }
+
+/* RTL support */
+.ui-datepicker-rtl { direction: rtl; }
+.ui-datepicker-rtl .ui-datepicker-prev { right: 2px; left: auto; }
+.ui-datepicker-rtl .ui-datepicker-next { left: 2px; right: auto; }
+.ui-datepicker-rtl .ui-datepicker-prev:hover { right: 1px; left: auto; }
+.ui-datepicker-rtl .ui-datepicker-next:hover { left: 1px; right: auto; }
+.ui-datepicker-rtl .ui-datepicker-buttonpane { clear:right; }
+.ui-datepicker-rtl .ui-datepicker-buttonpane button { float: left; }
+.ui-datepicker-rtl .ui-datepicker-buttonpane button.ui-datepicker-current { float:right; }
+.ui-datepicker-rtl .ui-datepicker-group { float:right; }
+.ui-datepicker-rtl .ui-datepicker-group-last .ui-datepicker-header { border-right-width:0; border-left-width:1px; }
+.ui-datepicker-rtl .ui-datepicker-group-middle .ui-datepicker-header { border-right-width:0; border-left-width:1px; }
+
+/* IE6 IFRAME FIX (taken from datepicker 1.5.3 */
+.ui-datepicker-cover {
+    display: none; /*sorry for IE5*/
+    display/**/: block; /*sorry for IE5*/
+    position: absolute; /*must have*/
+    z-index: -1; /*must have*/
+    filter: mask(); /*must have*/
+    top: -4px; /*must have*/
+    left: -4px; /*must have*/
+    width: 200px; /*must have*/
+    height: 200px; /*must have*/
+}/*
+ * jQuery UI Dialog 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Dialog#theming
+ */
+.ui-dialog { position: absolute; padding: .2em; width: 300px; overflow: hidden; }
+.ui-dialog .ui-dialog-titlebar { padding: .4em 1em; position: relative;  }
+.ui-dialog .ui-dialog-title { float: left; margin: .1em 16px .1em 0; } 
+.ui-dialog .ui-dialog-titlebar-close { position: absolute; right: .3em; top: 50%; width: 19px; margin: -10px 0 0 0; padding: 1px; height: 18px; }
+.ui-dialog .ui-dialog-titlebar-close span { display: block; margin: 1px; }
+.ui-dialog .ui-dialog-titlebar-close:hover, .ui-dialog .ui-dialog-titlebar-close:focus { padding: 0; }
+.ui-dialog .ui-dialog-content { position: relative; border: 0; padding: .5em 1em; background: none; overflow: auto; zoom: 1; }
+.ui-dialog .ui-dialog-buttonpane { text-align: left; border-width: 1px 0 0 0; background-image: none; margin: .5em 0 0 0; padding: .3em 1em .5em .4em; }
+.ui-dialog .ui-dialog-buttonpane .ui-dialog-buttonset { float: right; }
+.ui-dialog .ui-dialog-buttonpane button { margin: .5em .4em .5em 0; cursor: pointer; }
+.ui-dialog .ui-resizable-se { width: 14px; height: 14px; right: 3px; bottom: 3px; }
+.ui-draggable .ui-dialog-titlebar { cursor: move; }
+/*
+ * jQuery UI Progressbar 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Progressbar#theming
+ */
+.ui-progressbar { height:2em; text-align: left; }
+.ui-progressbar .ui-progressbar-value {margin: -1px; height:100%; }/*
+ * jQuery UI Resizable 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Resizable#theming
+ */
+.ui-resizable { position: relative;}
+.ui-resizable-handle { position: absolute;font-size: 0.1px;z-index: 99999; display: block; }
+.ui-resizable-disabled .ui-resizable-handle, .ui-resizable-autohide .ui-resizable-handle { display: none; }
+.ui-resizable-n { cursor: n-resize; height: 7px; width: 100%; top: -5px; left: 0; }
+.ui-resizable-s { cursor: s-resize; height: 7px; width: 100%; bottom: -5px; left: 0; }
+.ui-resizable-e { cursor: e-resize; width: 7px; right: -5px; top: 0; height: 100%; }
+.ui-resizable-w { cursor: w-resize; width: 7px; left: -5px; top: 0; height: 100%; }
+.ui-resizable-se { cursor: se-resize; width: 12px; height: 12px; right: 1px; bottom: 1px; }
+.ui-resizable-sw { cursor: sw-resize; width: 9px; height: 9px; left: -5px; bottom: -5px; }
+.ui-resizable-nw { cursor: nw-resize; width: 9px; height: 9px; left: -5px; top: -5px; }
+.ui-resizable-ne { cursor: ne-resize; width: 9px; height: 9px; right: -5px; top: -5px;}/*
+ * jQuery UI Selectable 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Selectable#theming
+ */
+.ui-selectable-helper { position: absolute; z-index: 100; border:1px dotted black; }
+/*
+ * jQuery UI Slider 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Slider#theming
+ */
+.ui-slider { position: relative; text-align: left; }
+.ui-slider .ui-slider-handle { position: absolute; z-index: 2; width: 1.2em; height: 1.2em; cursor: default; }
+.ui-slider .ui-slider-range { position: absolute; z-index: 1; font-size: .7em; display: block; border: 0; background-position: 0 0; }
+
+.ui-slider-horizontal { height: .8em; }
+.ui-slider-horizontal .ui-slider-handle { top: -.3em; margin-left: -.6em; }
+.ui-slider-horizontal .ui-slider-range { top: 0; height: 100%; }
+.ui-slider-horizontal .ui-slider-range-min { left: 0; }
+.ui-slider-horizontal .ui-slider-range-max { right: 0; }
+
+.ui-slider-vertical { width: .8em; height: 100px; }
+.ui-slider-vertical .ui-slider-handle { left: -.3em; margin-left: 0; margin-bottom: -.6em; }
+.ui-slider-vertical .ui-slider-range { left: 0; width: 100%; }
+.ui-slider-vertical .ui-slider-range-min { bottom: 0; }
+.ui-slider-vertical .ui-slider-range-max { top: 0; }/*
+ * jQuery UI Tabs 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Tabs#theming
+ */
+.ui-tabs { position: relative; padding: .2em; zoom: 1; } /* position: relative prevents IE scroll bug (element with position: relative inside container with overflow: auto appear as "fixed") */
+.ui-tabs .ui-tabs-nav { margin: 0; padding: .2em .2em 0; }
+.ui-tabs .ui-tabs-nav li { list-style: none; float: left; position: relative; top: 1px; margin: 0 .2em 1px 0; border-bottom: 0 !important; padding: 0; white-space: nowrap; }
+.ui-tabs .ui-tabs-nav li a { float: left; padding: .5em 1em; text-decoration: none; }
+.ui-tabs .ui-tabs-nav li.ui-tabs-selected { margin-bottom: 0; padding-bottom: 1px; }
+.ui-tabs .ui-tabs-nav li.ui-tabs-selected a, .ui-tabs .ui-tabs-nav li.ui-state-disabled a, .ui-tabs .ui-tabs-nav li.ui-state-processing a { cursor: text; }
+.ui-tabs .ui-tabs-nav li a, .ui-tabs.ui-tabs-collapsible .ui-tabs-nav li.ui-tabs-selected a { cursor: pointer; } /* first selector in group seems obsolete, but required to overcome bug in Opera applying cursor: text overall if defined elsewhere... */
+.ui-tabs .ui-tabs-panel { display: block; border-width: 0; padding: 1em 1.4em; background: none; }
+.ui-tabs .ui-tabs-hide { display: none !important; }
+/*
+ * jQuery UI CSS Framework 1.8.16
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Theming/API
+ *
+ * To view and modify this theme, visit http://jqueryui.com/themeroller/
+ */
+
+
+/* Component containers
+----------------------------------*/
+.ui-widget { font-family: Verdana,Arial,sans-serif/*{ffDefault}*/; font-size: 1.1em/*{fsDefault}*/; }
+.ui-widget .ui-widget { font-size: 1em; }
+.ui-widget input, .ui-widget select, .ui-widget textarea, .ui-widget button { font-family: Verdana,Arial,sans-serif/*{ffDefault}*/; font-size: 1em; }
+.ui-widget-content { border: 1px solid #aaaaaa/*{borderColorContent}*/; background: #ffffff/*{bgColorContent}*/ url(images/ui-bg_flat_75_ffffff_40x100.png)/*{bgImgUrlContent}*/ 50%/*{bgContentXPos}*/ 50%/*{bgContentYPos}*/ repeat-x/*{bgContentRepeat}*/; color: #222222/*{fcContent}*/; }
+.ui-widget-content a { color: #222222/*{fcContent}*/; }
+.ui-widget-header { border: 1px solid #aaaaaa/*{borderColorHeader}*/; background: #cccccc/*{bgColorHeader}*/ url(images/ui-bg_highlight-soft_75_cccccc_1x100.png)/*{bgImgUrlHeader}*/ 50%/*{bgHeaderXPos}*/ 50%/*{bgHeaderYPos}*/ repeat-x/*{bgHeaderRepeat}*/; color: #222222/*{fcHeader}*/; font-weight: bold; }
+.ui-widget-header a { color: #222222/*{fcHeader}*/; }
+
+/* Interaction states
+----------------------------------*/
+.ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default { border: 1px solid #d3d3d3/*{borderColorDefault}*/; background: #e6e6e6/*{bgColorDefault}*/ url(images/ui-bg_glass_75_e6e6e6_1x400.png)/*{bgImgUrlDefault}*/ 50%/*{bgDefaultXPos}*/ 50%/*{bgDefaultYPos}*/ repeat-x/*{bgDefaultRepeat}*/; font-weight: normal/*{fwDefault}*/; color: #555555/*{fcDefault}*/; }
+.ui-state-default a, .ui-state-default a:link, .ui-state-default a:visited { color: #555555/*{fcDefault}*/; text-decoration: none; }
+.ui-state-hover, .ui-widget-content .ui-state-hover, .ui-widget-header .ui-state-hover, .ui-state-focus, .ui-widget-content .ui-state-focus, .ui-widget-header .ui-state-focus { border: 1px solid #999999/*{borderColorHover}*/; background: #dadada/*{bgColorHover}*/ url(images/ui-bg_glass_75_dadada_1x400.png)/*{bgImgUrlHover}*/ 50%/*{bgHoverXPos}*/ 50%/*{bgHoverYPos}*/ repeat-x/*{bgHoverRepeat}*/; font-weight: normal/*{fwDefault}*/; color: #212121/*{fcHover}*/; }
+.ui-state-hover a, .ui-state-hover a:hover { color: #212121/*{fcHover}*/; text-decoration: none; }
+.ui-state-active, .ui-widget-content .ui-state-active, .ui-widget-header .ui-state-active { border: 1px solid #aaaaaa/*{borderColorActive}*/; background: #ffffff/*{bgColorActive}*/ url(images/ui-bg_glass_65_ffffff_1x400.png)/*{bgImgUrlActive}*/ 50%/*{bgActiveXPos}*/ 50%/*{bgActiveYPos}*/ repeat-x/*{bgActiveRepeat}*/; font-weight: normal/*{fwDefault}*/; color: #212121/*{fcActive}*/; }
+.ui-state-active a, .ui-state-active a:link, .ui-state-active a:visited { color: #212121/*{fcActive}*/; text-decoration: none; }
+.ui-widget :active { outline: none; }
+
+/* Interaction Cues
+----------------------------------*/
+.ui-state-highlight, .ui-widget-content .ui-state-highlight, .ui-widget-header .ui-state-highlight  {border: 1px solid #fcefa1/*{borderColorHighlight}*/; background: #fbf9ee/*{bgColorHighlight}*/ url(images/ui-bg_glass_55_fbf9ee_1x400.png)/*{bgImgUrlHighlight}*/ 50%/*{bgHighlightXPos}*/ 50%/*{bgHighlightYPos}*/ repeat-x/*{bgHighlightRepeat}*/; color: #363636/*{fcHighlight}*/; }
+.ui-state-highlight a, .ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a { color: #363636/*{fcHighlight}*/; }
+.ui-state-error, .ui-widget-content .ui-state-error, .ui-widget-header .ui-state-error {border: 1px solid #cd0a0a/*{borderColorError}*/; background: #fef1ec/*{bgColorError}*/ url(images/ui-bg_glass_95_fef1ec_1x400.png)/*{bgImgUrlError}*/ 50%/*{bgErrorXPos}*/ 50%/*{bgErrorYPos}*/ repeat-x/*{bgErrorRepeat}*/; color: #cd0a0a/*{fcError}*/; }
+.ui-state-error a, .ui-widget-content .ui-state-error a, .ui-widget-header .ui-state-error a { color: #cd0a0a/*{fcError}*/; }
+.ui-state-error-text, .ui-widget-content .ui-state-error-text, .ui-widget-header .ui-state-error-text { color: #cd0a0a/*{fcError}*/; }
+.ui-priority-primary, .ui-widget-content .ui-priority-primary, .ui-widget-header .ui-priority-primary { font-weight: bold; }
+.ui-priority-secondary, .ui-widget-content .ui-priority-secondary,  .ui-widget-header .ui-priority-secondary { opacity: .7; filter:Alpha(Opacity=70); font-weight: normal; }
+.ui-state-disabled, .ui-widget-content .ui-state-disabled, .ui-widget-header .ui-state-disabled { opacity: .35; filter:Alpha(Opacity=35); background-image: none; }
+
+/* Icons
+----------------------------------*/
+
+/* states and images */
+.ui-icon { width: 16px; height: 16px; background-image: url(images/ui-icons_222222_256x240.png)/*{iconsContent}*/; }
+.ui-widget-content .ui-icon {background-image: url(images/ui-icons_222222_256x240.png)/*{iconsContent}*/; }
+.ui-widget-header .ui-icon {background-image: url(images/ui-icons_222222_256x240.png)/*{iconsHeader}*/; }
+.ui-state-default .ui-icon { background-image: url(images/ui-icons_888888_256x240.png)/*{iconsDefault}*/; }
+.ui-state-hover .ui-icon, .ui-state-focus .ui-icon {background-image: url(images/ui-icons_454545_256x240.png)/*{iconsHover}*/; }
+.ui-state-active .ui-icon {background-image: url(images/ui-icons_454545_256x240.png)/*{iconsActive}*/; }
+.ui-state-highlight .ui-icon {background-image: url(images/ui-icons_2e83ff_256x240.png)/*{iconsHighlight}*/; }
+.ui-state-error .ui-icon, .ui-state-error-text .ui-icon {background-image: url(images/ui-icons_cd0a0a_256x240.png)/*{iconsError}*/; }
+
+/* positioning */
+.ui-icon-carat-1-n { background-position: 0 0; }
+.ui-icon-carat-1-ne { background-position: -16px 0; }
+.ui-icon-carat-1-e { background-position: -32px 0; }
+.ui-icon-carat-1-se { background-position: -48px 0; }
+.ui-icon-carat-1-s { background-position: -64px 0; }
+.ui-icon-carat-1-sw { background-position: -80px 0; }
+.ui-icon-carat-1-w { background-position: -96px 0; }
+.ui-icon-carat-1-nw { background-position: -112px 0; }
+.ui-icon-carat-2-n-s { background-position: -128px 0; }
+.ui-icon-carat-2-e-w { background-position: -144px 0; }
+.ui-icon-triangle-1-n { background-position: 0 -16px; }
+.ui-icon-triangle-1-ne { background-position: -16px -16px; }
+.ui-icon-triangle-1-e { background-position: -32px -16px; }
+.ui-icon-triangle-1-se { background-position: -48px -16px; }
+.ui-icon-triangle-1-s { background-position: -64px -16px; }
+.ui-icon-triangle-1-sw { background-position: -80px -16px; }
+.ui-icon-triangle-1-w { background-position: -96px -16px; }
+.ui-icon-triangle-1-nw { background-position: -112px -16px; }
+.ui-icon-triangle-2-n-s { background-position: -128px -16px; }
+.ui-icon-triangle-2-e-w { background-position: -144px -16px; }
+.ui-icon-arrow-1-n { background-position: 0 -32px; }
+.ui-icon-arrow-1-ne { background-position: -16px -32px; }
+.ui-icon-arrow-1-e { background-position: -32px -32px; }
+.ui-icon-arrow-1-se { background-position: -48px -32px; }
+.ui-icon-arrow-1-s { background-position: -64px -32px; }
+.ui-icon-arrow-1-sw { background-position: -80px -32px; }
+.ui-icon-arrow-1-w { background-position: -96px -32px; }
+.ui-icon-arrow-1-nw { background-position: -112px -32px; }
+.ui-icon-arrow-2-n-s { background-position: -128px -32px; }
+.ui-icon-arrow-2-ne-sw { background-position: -144px -32px; }
+.ui-icon-arrow-2-e-w { background-position: -160px -32px; }
+.ui-icon-arrow-2-se-nw { background-position: -176px -32px; }
+.ui-icon-arrowstop-1-n { background-position: -192px -32px; }
+.ui-icon-arrowstop-1-e { background-position: -208px -32px; }
+.ui-icon-arrowstop-1-s { background-position: -224px -32px; }
+.ui-icon-arrowstop-1-w { background-position: -240px -32px; }
+.ui-icon-arrowthick-1-n { background-position: 0 -48px; }
+.ui-icon-arrowthick-1-ne { background-position: -16px -48px; }
+.ui-icon-arrowthick-1-e { background-position: -32px -48px; }
+.ui-icon-arrowthick-1-se { background-position: -48px -48px; }
+.ui-icon-arrowthick-1-s { background-position: -64px -48px; }
+.ui-icon-arrowthick-1-sw { background-position: -80px -48px; }
+.ui-icon-arrowthick-1-w { background-position: -96px -48px; }
+.ui-icon-arrowthick-1-nw { background-position: -112px -48px; }
+.ui-icon-arrowthick-2-n-s { background-position: -128px -48px; }
+.ui-icon-arrowthick-2-ne-sw { background-position: -144px -48px; }
+.ui-icon-arrowthick-2-e-w { background-position: -160px -48px; }
+.ui-icon-arrowthick-2-se-nw { background-position: -176px -48px; }
+.ui-icon-arrowthickstop-1-n { background-position: -192px -48px; }
+.ui-icon-arrowthickstop-1-e { background-position: -208px -48px; }
+.ui-icon-arrowthickstop-1-s { background-position: -224px -48px; }
+.ui-icon-arrowthickstop-1-w { background-position: -240px -48px; }
+.ui-icon-arrowreturnthick-1-w { background-position: 0 -64px; }
+.ui-icon-arrowreturnthick-1-n { background-position: -16px -64px; }
+.ui-icon-arrowreturnthick-1-e { background-position: -32px -64px; }
+.ui-icon-arrowreturnthick-1-s { background-position: -48px -64px; }
+.ui-icon-arrowreturn-1-w { background-position: -64px -64px; }
+.ui-icon-arrowreturn-1-n { background-position: -80px -64px; }
+.ui-icon-arrowreturn-1-e { background-position: -96px -64px; }
+.ui-icon-arrowreturn-1-s { background-position: -112px -64px; }
+.ui-icon-arrowrefresh-1-w { background-position: -128px -64px; }
+.ui-icon-arrowrefresh-1-n { background-position: -144px -64px; }
+.ui-icon-arrowrefresh-1-e { background-position: -160px -64px; }
+.ui-icon-arrowrefresh-1-s { background-position: -176px -64px; }
+.ui-icon-arrow-4 { background-position: 0 -80px; }
+.ui-icon-arrow-4-diag { background-position: -16px -80px; }
+.ui-icon-extlink { background-position: -32px -80px; }
+.ui-icon-newwin { background-position: -48px -80px; }
+.ui-icon-refresh { background-position: -64px -80px; }
+.ui-icon-shuffle { background-position: -80px -80px; }
+.ui-icon-transfer-e-w { background-position: -96px -80px; }
+.ui-icon-transferthick-e-w { background-position: -112px -80px; }
+.ui-icon-folder-collapsed { background-position: 0 -96px; }
+.ui-icon-folder-open { background-position: -16px -96px; }
+.ui-icon-document { background-position: -32px -96px; }
+.ui-icon-document-b { background-position: -48px -96px; }
+.ui-icon-note { background-position: -64px -96px; }
+.ui-icon-mail-closed { background-position: -80px -96px; }
+.ui-icon-mail-open { background-position: -96px -96px; }
+.ui-icon-suitcase { background-position: -112px -96px; }
+.ui-icon-comment { background-position: -128px -96px; }
+.ui-icon-person { background-position: -144px -96px; }
+.ui-icon-print { background-position: -160px -96px; }
+.ui-icon-trash { background-position: -176px -96px; }
+.ui-icon-locked { background-position: -192px -96px; }
+.ui-icon-unlocked { background-position: -208px -96px; }
+.ui-icon-bookmark { background-position: -224px -96px; }
+.ui-icon-tag { background-position: -240px -96px; }
+.ui-icon-home { background-position: 0 -112px; }
+.ui-icon-flag { background-position: -16px -112px; }
+.ui-icon-calendar { background-position: -32px -112px; }
+.ui-icon-cart { background-position: -48px -112px; }
+.ui-icon-pencil { background-position: -64px -112px; }
+.ui-icon-clock { background-position: -80px -112px; }
+.ui-icon-disk { background-position: -96px -112px; }
+.ui-icon-calculator { background-position: -112px -112px; }
+.ui-icon-zoomin { background-position: -128px -112px; }
+.ui-icon-zoomout { background-position: -144px -112px; }
+.ui-icon-search { background-position: -160px -112px; }
+.ui-icon-wrench { background-position: -176px -112px; }
+.ui-icon-gear { background-position: -192px -112px; }
+.ui-icon-heart { background-position: -208px -112px; }
+.ui-icon-star { background-position: -224px -112px; }
+.ui-icon-link { background-position: -240px -112px; }
+.ui-icon-cancel { background-position: 0 -128px; }
+.ui-icon-plus { background-position: -16px -128px; }
+.ui-icon-plusthick { background-position: -32px -128px; }
+.ui-icon-minus { background-position: -48px -128px; }
+.ui-icon-minusthick { background-position: -64px -128px; }
+.ui-icon-close { background-position: -80px -128px; }
+.ui-icon-closethick { background-position: -96px -128px; }
+.ui-icon-key { background-position: -112px -128px; }
+.ui-icon-lightbulb { background-position: -128px -128px; }
+.ui-icon-scissors { background-position: -144px -128px; }
+.ui-icon-clipboard { background-position: -160px -128px; }
+.ui-icon-copy { background-position: -176px -128px; }
+.ui-icon-contact { background-position: -192px -128px; }
+.ui-icon-image { background-position: -208px -128px; }
+.ui-icon-video { background-position: -224px -128px; }
+.ui-icon-script { background-position: -240px -128px; }
+.ui-icon-alert { background-position: 0 -144px; }
+.ui-icon-info { background-position: -16px -144px; }
+.ui-icon-notice { background-position: -32px -144px; }
+.ui-icon-help { background-position: -48px -144px; }
+.ui-icon-check { background-position: -64px -144px; }
+.ui-icon-bullet { background-position: -80px -144px; }
+.ui-icon-radio-off { background-position: -96px -144px; }
+.ui-icon-radio-on { background-position: -112px -144px; }
+.ui-icon-pin-w { background-position: -128px -144px; }
+.ui-icon-pin-s { background-position: -144px -144px; }
+.ui-icon-play { background-position: 0 -160px; }
+.ui-icon-pause { background-position: -16px -160px; }
+.ui-icon-seek-next { background-position: -32px -160px; }
+.ui-icon-seek-prev { background-position: -48px -160px; }
+.ui-icon-seek-end { background-position: -64px -160px; }
+.ui-icon-seek-start { background-position: -80px -160px; }
+/* ui-icon-seek-first is deprecated, use ui-icon-seek-start instead */
+.ui-icon-seek-first { background-position: -80px -160px; }
+.ui-icon-stop { background-position: -96px -160px; }
+.ui-icon-eject { background-position: -112px -160px; }
+.ui-icon-volume-off { background-position: -128px -160px; }
+.ui-icon-volume-on { background-position: -144px -160px; }
+.ui-icon-power { background-position: 0 -176px; }
+.ui-icon-signal-diag { background-position: -16px -176px; }
+.ui-icon-signal { background-position: -32px -176px; }
+.ui-icon-battery-0 { background-position: -48px -176px; }
+.ui-icon-battery-1 { background-position: -64px -176px; }
+.ui-icon-battery-2 { background-position: -80px -176px; }
+.ui-icon-battery-3 { background-position: -96px -176px; }
+.ui-icon-circle-plus { background-position: 0 -192px; }
+.ui-icon-circle-minus { background-position: -16px -192px; }
+.ui-icon-circle-close { background-position: -32px -192px; }
+.ui-icon-circle-triangle-e { background-position: -48px -192px; }
+.ui-icon-circle-triangle-s { background-position: -64px -192px; }
+.ui-icon-circle-triangle-w { background-position: -80px -192px; }
+.ui-icon-circle-triangle-n { background-position: -96px -192px; }
+.ui-icon-circle-arrow-e { background-position: -112px -192px; }
+.ui-icon-circle-arrow-s { background-position: -128px -192px; }
+.ui-icon-circle-arrow-w { background-position: -144px -192px; }
+.ui-icon-circle-arrow-n { background-position: -160px -192px; }
+.ui-icon-circle-zoomin { background-position: -176px -192px; }
+.ui-icon-circle-zoomout { background-position: -192px -192px; }
+.ui-icon-circle-check { background-position: -208px -192px; }
+.ui-icon-circlesmall-plus { background-position: 0 -208px; }
+.ui-icon-circlesmall-minus { background-position: -16px -208px; }
+.ui-icon-circlesmall-close { background-position: -32px -208px; }
+.ui-icon-squaresmall-plus { background-position: -48px -208px; }
+.ui-icon-squaresmall-minus { background-position: -64px -208px; }
+.ui-icon-squaresmall-close { background-position: -80px -208px; }
+.ui-icon-grip-dotted-vertical { background-position: 0 -224px; }
+.ui-icon-grip-dotted-horizontal { background-position: -16px -224px; }
+.ui-icon-grip-solid-vertical { background-position: -32px -224px; }
+.ui-icon-grip-solid-horizontal { background-position: -48px -224px; }
+.ui-icon-gripsmall-diagonal-se { background-position: -64px -224px; }
+.ui-icon-grip-diagonal-se { background-position: -80px -224px; }
+
+
+/* Misc visuals
+----------------------------------*/
+
+/* Corner radius */
+.ui-corner-all, .ui-corner-top, .ui-corner-left, .ui-corner-tl { -moz-border-radius-topleft: 4px/*{cornerRadius}*/; -webkit-border-top-left-radius: 4px/*{cornerRadius}*/; -khtml-border-top-left-radius: 4px/*{cornerRadius}*/; border-top-left-radius: 4px/*{cornerRadius}*/; }
+.ui-corner-all, .ui-corner-top, .ui-corner-right, .ui-corner-tr { -moz-border-radius-topright: 4px/*{cornerRadius}*/; -webkit-border-top-right-radius: 4px/*{cornerRadius}*/; -khtml-border-top-right-radius: 4px/*{cornerRadius}*/; border-top-right-radius: 4px/*{cornerRadius}*/; }
+.ui-corner-all, .ui-corner-bottom, .ui-corner-left, .ui-corner-bl { -moz-border-radius-bottomleft: 4px/*{cornerRadius}*/; -webkit-border-bottom-left-radius: 4px/*{cornerRadius}*/; -khtml-border-bottom-left-radius: 4px/*{cornerRadius}*/; border-bottom-left-radius: 4px/*{cornerRadius}*/; }
+.ui-corner-all, .ui-corner-bottom, .ui-corner-right, .ui-corner-br { -moz-border-radius-bottomright: 4px/*{cornerRadius}*/; -webkit-border-bottom-right-radius: 4px/*{cornerRadius}*/; -khtml-border-bottom-right-radius: 4px/*{cornerRadius}*/; border-bottom-right-radius: 4px/*{cornerRadius}*/; }
+
+/* Overlays */
+.ui-widget-overlay { background: #aaaaaa/*{bgColorOverlay}*/ url(images/ui-bg_flat_0_aaaaaa_40x100.png)/*{bgImgUrlOverlay}*/ 50%/*{bgOverlayXPos}*/ 50%/*{bgOverlayYPos}*/ repeat-x/*{bgOverlayRepeat}*/; opacity: .3;filter:Alpha(Opacity=30)/*{opacityOverlay}*/; }
+.ui-widget-shadow { margin: -8px/*{offsetTopShadow}*/ 0 0 -8px/*{offsetLeftShadow}*/; padding: 8px/*{thicknessShadow}*/; background: #aaaaaa/*{bgColorShadow}*/ url(images/ui-bg_flat_0_aaaaaa_40x100.png)/*{bgImgUrlShadow}*/ 50%/*{bgShadowXPos}*/ 50%/*{bgShadowYPos}*/ repeat-x/*{bgShadowRepeat}*/; opacity: .3;filter:Alpha(Opacity=30)/*{opacityShadow}*/; -moz-border-radius: 8px/*{cornerRadiusShadow}*/; -khtml-border-radius: 8px/*{cornerRadiusShadow}*/; -webkit-border-radius: 8px/*{cornerRadiusShadow}*/; border-radius: 8px/*{cornerRadiusShadow}*/; }

二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_diagonals-thick_8_333333_40x40.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_flat_65_ffffff_40x100.png


二进制
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/themes-1.8.16/black-tie/images/ui-bg_glass_40_111111_1x400.png


部分文件因为文件数量过多而无法显示