Prechádzať zdrojové kódy

Merge r1432789 through r1437840 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1437843 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 rokov pred
rodič
commit
74d1e5c302
100 zmenil súbory, kde vykonal 1767 pridanie a 653 odobranie
  1. 2 2
      dev-support/test-patch.sh
  2. 60 5
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 0 67
      hadoop-common-project/hadoop-common/dev-support/saveVersion.sh
  4. 61 17
      hadoop-common-project/hadoop-common/pom.xml
  5. 17 0
      hadoop-common-project/hadoop-common/src/config.h.cmake
  6. 4 4
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  7. 219 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  8. 0 16
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml
  9. 0 74
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopVersionAnnotation.java
  10. 14 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  11. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  12. 76 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  13. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  14. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  15. 15 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  16. 20 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  17. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  18. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  19. 62 28
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
  20. 13 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
  21. 25 0
      hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
  22. 18 2
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  23. 17 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
  24. 100 31
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
  25. 29 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  26. 12 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java
  27. 15 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
  28. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  29. 13 0
      hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
  30. 0 3
      hadoop-common-project/pom.xml
  31. 0 3
      hadoop-dist/pom.xml
  32. 2 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  33. 17 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
  34. 26 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
  35. 47 2
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  36. 27 0
      hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt
  37. 6 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  38. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
  39. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
  40. 126 38
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  41. 12 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
  42. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
  43. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  44. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
  45. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  46. 7 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
  47. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  48. 89 81
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  49. 42 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  50. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  51. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
  52. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
  53. 14 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
  54. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  55. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
  56. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java
  57. 12 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
  58. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java
  59. 0 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html
  60. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
  61. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  62. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  63. 41 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  64. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  65. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  66. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
  67. 1 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  68. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  69. 8 31
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  70. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
  71. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
  72. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
  73. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  74. 0 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj
  75. 1 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  76. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  77. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
  78. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
  79. 15 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
  80. 18 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
  81. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  82. 0 3
      hadoop-hdfs-project/pom.xml
  83. 45 4
      hadoop-mapreduce-project/CHANGES.txt
  84. 13 0
      hadoop-mapreduce-project/conf/mapred-site.xml.template
  85. 1 1
      hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
  86. 2 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  87. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  88. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  89. 56 23
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  90. 13 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
  91. 7 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
  92. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  93. 12 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
  94. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  95. 17 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
  96. 17 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
  97. 4 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
  98. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
  99. 5 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java
  100. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java

+ 2 - 2
dev-support/test-patch.sh

@@ -980,12 +980,12 @@ fi
 (( RESULT = RESULT + $JAVAC_RET ))
 (( RESULT = RESULT + $JAVAC_RET ))
 checkJavadocWarnings
 checkJavadocWarnings
 (( RESULT = RESULT + $? ))
 (( RESULT = RESULT + $? ))
-checkEclipseGeneration
-(( RESULT = RESULT + $? ))
 ### Checkstyle not implemented yet
 ### Checkstyle not implemented yet
 #checkStyle
 #checkStyle
 #(( RESULT = RESULT + $? ))
 #(( RESULT = RESULT + $? ))
 buildAndInstall
 buildAndInstall
+checkEclipseGeneration
+(( RESULT = RESULT + $? ))
 checkFindbugsWarnings
 checkFindbugsWarnings
 (( RESULT = RESULT + $? ))
 (( RESULT = RESULT + $? ))
 checkReleaseAuditWarnings
 checkReleaseAuditWarnings

+ 60 - 5
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -146,6 +146,9 @@ Trunk (Unreleased)
     HADOOP-9162. Add utility to check native library availability.
     HADOOP-9162. Add utility to check native library availability.
     (Binglin Chang via suresh)
     (Binglin Chang via suresh)
 
 
+    HADOOP-8924. Add maven plugin alternative to shell script to save
+    package-info.java. (Chris Nauroth via suresh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
@@ -308,10 +311,13 @@ Trunk (Unreleased)
     HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
     HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
     Windows. (Chris Nauroth via suresh)
     Windows. (Chris Nauroth via suresh)
 
 
-    HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for
+    HADOOP-8957. AbstractFileSystem#IsValidName should be overridden for
     embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
     embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
 
 
-    HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby)
+    HADOOP-9139. improve killKdc.sh (Ivan A. Veselovsky via bobby)
+
+    HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
+    a new module to the build (Chris Nauroth via bobby)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -323,6 +329,8 @@ Release 2.0.3-alpha - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    HADOOP-8999. SASL negotiation is flawed (daryn)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-8597. Permit FsShell's text command to read Avro files.
     HADOOP-8597. Permit FsShell's text command to read Avro files.
@@ -433,6 +441,18 @@ Release 2.0.3-alpha - Unreleased
     HADOOP-9192. Move token related request/response messages to common.
     HADOOP-9192. Move token related request/response messages to common.
     (suresh)
     (suresh)
 
 
+    HADOOP-8712. Change default hadoop.security.group.mapping to
+    JniBasedUnixGroupsNetgroupMappingWithFallback (Robert Parker via todd)
+
+    HADOOP-9106. Allow configuration of IPC connect timeout.
+    (Rober Parker via suresh)
+
+    HADOOP-9216. CompressionCodecFactory#getCodecClasses should trim the
+    result of parsing by Configuration. (Tsuyoshi Ozawa via todd)
+
+    HADOOP-9231. Parametrize staging URL for the uniformity of
+    distributionManagement. (Konstantin Boudnik via suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -493,8 +513,6 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
     HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
 
 
-    HADOOP-8999. SASL negotiation is flawed (daryn)
-
     HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
     HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
 
 
     HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
     HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
@@ -537,6 +555,23 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
     HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
 
 
+    HADOOP-9203. RPCCallBenchmark should find a random available port.
+    (Andrew Purtell via suresh)
+
+    HADOOP-9178. src/main/conf is missing hadoop-policy.xml.
+    (Sandy Ryza via eli)
+
+    HADOOP-8816. HTTP Error 413 full HEAD if using kerberos authentication. 
+    (moritzmoeller via tucu)
+    
+    HADOOP-9212. Potential deadlock in FileSystem.Cache/IPC/UGI. (tomwhite)
+
+    HADOOP-9193. hadoop script can inadvertently expand wildcard arguments
+    when delegating to hdfs script. (Andy Isaacson via todd)
+
+    HADOOP-9215. when using cmake-2.6, libhadoop.so doesn't get created
+    (only libhadoop.so.1.0.0) (Colin Patrick McCabe via todd)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1227,6 +1262,21 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
     HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
     bobby) 
     bobby) 
 
 
+Release 0.23.7 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
+    permissions (Ivan A. Veselovsky via bobby)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1234,6 +1284,8 @@ Release 0.23.6 - UNRELEASED
   NEW FEATURES
   NEW FEATURES
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+    HADOOP-9217. Print thread dumps when hadoop-common tests fail.
+    (Andrey Klochkov via suresh)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -1250,7 +1302,10 @@ Release 0.23.6 - UNRELEASED
 
 
     HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby)
     HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby)
 
 
-Release 0.23.5 - UNRELEASED
+    HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves)
+
+Release 0.23.5 - 2012-11-28
+
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 

+ 0 - 67
hadoop-common-project/hadoop-common/dev-support/saveVersion.sh

@@ -1,67 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# This file is used to generate the package-info.java class that
-# records the version, revision, branch, user, timestamp, and url
-unset LANG
-unset LC_CTYPE
-unset LC_TIME
-version=$1
-build_dir=$2
-user=`whoami | tr '\n\r' '\n'`
-date=`date`
-cwd=`pwd`
-if git rev-parse HEAD 2>/dev/null > /dev/null ; then
-  revision=`git log -1 --pretty=format:"%H"`
-  hostname=`hostname`
-  branch=`git branch | sed -n -e 's/^* //p'`
-  url="git://${hostname}${cwd}"
-elif [ -d .svn ]; then
-  revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
-  url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'`
-  # Get canonical branch (branches/X, tags/X, or trunk)
-  branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \
-                             -e 's,.*\(tags/.*\)$,\1,p' \
-                             -e 's,.*trunk$,trunk,p'`
-else
-  revision="Unknown"
-  branch="Unknown"
-  url="file://$cwd"
-fi
-
-which md5sum > /dev/null
-if [ "$?" = "0" ] ; then
-  srcChecksum=`find src/main/java -name '*.java' | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1`
-else
-  srcChecksum="Not Available"
-fi
-
-mkdir -p $build_dir/org/apache/hadoop
-cat << EOF | \
-  sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \
-      -e "s|URL|$url|" -e "s/REV/$revision/" \
-      -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \
-      > $build_dir/org/apache/hadoop/package-info.java
-/*
- * Generated by src/saveVersion.sh
- */
-@HadoopVersionAnnotation(version="VERSION", revision="REV", branch="BRANCH",
-                         user="USER", date="DATE", url="URL",
-                         srcChecksum="SRCCHECKSUM")
-package org.apache.hadoop;
-EOF

+ 61 - 17
hadoop-common-project/hadoop-common/pom.xml

@@ -244,7 +244,51 @@
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>
+    <!--
+    Include all files in src/main/resources.  By default, do not apply property
+    substitution (filtering=false), but do apply property substitution to
+    common-version-info.properties (filtering=true).  This will substitute the
+    version information correctly, but prevent Maven from altering other files
+    like core-default.xml.
+    -->
+    <resources>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <excludes>
+          <exclude>common-version-info.properties</exclude>
+        </excludes>
+        <filtering>false</filtering>
+      </resource>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <includes>
+          <include>common-version-info.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
     <plugins>
     <plugins>
+      <plugin>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
+        <executions>
+          <execution>
+            <id>version-info</id>
+            <goals>
+              <goal>version-info</goal>
+            </goals>
+            <configuration>
+              <source>
+                <directory>${basedir}/src/main</directory>
+                <includes>
+                  <include>java/**/*.java</include>
+                  <include>proto/**/*.proto</include>
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <artifactId>maven-surefire-plugin</artifactId>
@@ -288,22 +332,6 @@
               </target>
               </target>
             </configuration>
             </configuration>
           </execution>
           </execution>
-          <execution>
-            <id>save-version</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${project.build.directory}/generated-sources/java"/>
-                <exec executable="sh">
-                  <arg
-                      line="${basedir}/dev-support/saveVersion.sh ${project.version} ${project.build.directory}/generated-sources/java"/>
-                </exec>
-              </target>
-            </configuration>
-          </execution>
           <execution>
           <execution>
             <id>generate-test-sources</id>
             <id>generate-test-sources</id>
             <phase>generate-test-sources</phase>
             <phase>generate-test-sources</phase>
@@ -445,13 +473,26 @@
             <exclude>dev-support/jdiff/**</exclude>
             <exclude>dev-support/jdiff/**</exclude>
             <exclude>src/main/native/*</exclude>
             <exclude>src/main/native/*</exclude>
             <exclude>src/main/native/config/*</exclude>
             <exclude>src/main/native/config/*</exclude>
-            <exclude>src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo</exclude>
             <exclude>src/main/native/m4/*</exclude>
             <exclude>src/main/native/m4/*</exclude>
             <exclude>src/test/empty-file</exclude>
             <exclude>src/test/empty-file</exclude>
             <exclude>src/test/all-tests</exclude>
             <exclude>src/test/all-tests</exclude>
+            <exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
+        </configuration>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 
@@ -513,6 +554,9 @@
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
                       <arg line="VERBOSE=1"/>
                     </exec>
                     </exec>
+                    <!-- The second make is a workaround for HADOOP-9215.  It can
+                         be removed when version 2.6 of cmake is no longer supported . -->
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
                   </target>
                   </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 17 - 0
hadoop-common-project/hadoop-common/src/config.h.cmake

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 #ifndef CONFIG_H
 #ifndef CONFIG_H
 #define CONFIG_H
 #define CONFIG_H
 
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -58,9 +58,9 @@ case $COMMAND in
     #try to locate hdfs and if present, delegate to it.  
     #try to locate hdfs and if present, delegate to it.  
     shift
     shift
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  $*
+      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  "$@"
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
+      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
     else
     else
       echo "HADOOP_HDFS_HOME not found!"
       echo "HADOOP_HDFS_HOME not found!"
       exit 1
       exit 1
@@ -75,9 +75,9 @@ case $COMMAND in
     #try to locate mapred and if present, delegate to it.
     #try to locate mapred and if present, delegate to it.
     shift
     shift
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $*
+      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $*
+      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
     else
     else
       echo "HADOOP_MAPRED_HOME not found!"
       echo "HADOOP_MAPRED_HOME not found!"
       exit 1
       exit 1

+ 219 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -0,0 +1,219 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for ResourceTracker protocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for RMAdminProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientRMProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AMRMProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManager protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+</configuration>

+ 0 - 16
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml

@@ -116,22 +116,6 @@
             <td>ACL for NamenodeProtocol, the protocol used by the secondary
             <td>ACL for NamenodeProtocol, the protocol used by the secondary
             namenode to communicate with the namenode.</td>
             namenode to communicate with the namenode.</td>
           </tr>
           </tr>
-          <tr>
-            <td><code>security.inter.tracker.protocol.acl</code></td>
-            <td>ACL for InterTrackerProtocol, used by the tasktrackers to 
-            communicate with the jobtracker.</td>
-          </tr>
-          <tr>
-            <td><code>security.job.submission.protocol.acl</code></td>
-            <td>ACL for JobSubmissionProtocol, used by job clients to 
-            communciate with the jobtracker for job submission, querying job status 
-            etc.</td>
-          </tr>
-          <tr>
-            <td><code>security.task.umbilical.protocol.acl</code></td>
-            <td>ACL for TaskUmbilicalProtocol, used by the map and reduce 
-            tasks to communicate with the parent tasktracker.</td>
-          </tr>
           <tr>
           <tr>
             <td><code>security.refresh.policy.protocol.acl</code></td>
             <td><code>security.refresh.policy.protocol.acl</code></td>
             <td>ACL for RefreshAuthorizationPolicyProtocol, used by the 
             <td>ACL for RefreshAuthorizationPolicyProtocol, used by the 

+ 0 - 74
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopVersionAnnotation.java

@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop;
-
-import java.lang.annotation.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * A package attribute that captures the version of Hadoop that was compiled.
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.PACKAGE)
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public @interface HadoopVersionAnnotation {
- 
-  /**
-   * Get the Hadoop version
-   * @return the version string "0.6.3-dev"
-   */
-  String version();
-  
-  /**
-   * Get the username that compiled Hadoop.
-   */
-  String user();
-  
-  /**
-   * Get the date when Hadoop was compiled.
-   * @return the date in unix 'date' format
-   */
-  String date();
-    
-  /**
-   * Get the url for the subversion repository.
-   */
-  String url();
-  
-  /**
-   * Get the subversion revision.
-   * @return the revision number as a string (eg. "451451")
-   */
-  String revision();
-
-  /**
-   * Get the branch from which this was compiled.
-   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
-   */
-  String branch();
-
-  /**
-   * Get a checksum of the source files from which
-   * Hadoop was compiled.
-   * @return a string that uniquely identifies the source
-   **/
-  String srcChecksum();    
-}

+ 14 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.authorize.Service;
 
 
 /** 
 /** 
  * This class contains constants for configuration keys used
  * This class contains constants for configuration keys used
@@ -114,7 +115,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 
   public static final String 
   SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
   SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
-  
+  public static final String
+  SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl";
+  public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL =
+      "security.client.datanode.protocol.acl";
+  public static final String
+  SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl";
+  public static final String
+  SECURITY_INTER_DATANODE_PROTOCOL_ACL = "security.inter.datanode.protocol.acl";
+  public static final String
+  SECURITY_NAMENODE_PROTOCOL_ACL = "security.namenode.protocol.acl";
+  public static final String SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL =
+      "security.qjournal.service.protocol.acl";
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
       "hadoop.security.token.service.use_ip";
       "hadoop.security.token.service.use_ip";
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
@@ -191,4 +203,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
     4*60*60; // 4 hours
     4*60*60; // 4 hours
 
 
-}
+}

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -173,6 +173,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
   /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
   public static final int     IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
   public static final int     IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  IPC_CLIENT_CONNECT_TIMEOUT_KEY =
+    "ipc.client.connect.timeout";
+  /** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */
+  public static final int     IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
     "ipc.client.connect.max.retries";
     "ipc.client.connect.max.retries";
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */

+ 76 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -87,33 +87,98 @@ public class FileUtil {
    * (4) If dir is a normal directory, then dir and all its contents recursively
    * (4) If dir is a normal directory, then dir and all its contents recursively
    *     are deleted.
    *     are deleted.
    */
    */
-  public static boolean fullyDelete(File dir) {
-    if (dir.delete()) {
+  public static boolean fullyDelete(final File dir) {
+    return fullyDelete(dir, false);
+  }
+  
+  /**
+   * Delete a directory and all its contents.  If
+   * we return false, the directory may be partially-deleted.
+   * (1) If dir is symlink to a file, the symlink is deleted. The file pointed
+   *     to by the symlink is not deleted.
+   * (2) If dir is symlink to a directory, symlink is deleted. The directory
+   *     pointed to by symlink is not deleted.
+   * (3) If dir is a normal file, it is deleted.
+   * (4) If dir is a normal directory, then dir and all its contents recursively
+   *     are deleted.
+   * @param dir the file or directory to be deleted
+   * @param tryGrantPermissions true if permissions should be modified to delete a file.
+   * @return true on success false on failure.
+   */
+  public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
+    if (tryGrantPermissions) {
+      // try to chmod +rwx the parent folder of the 'dir': 
+      File parent = dir.getParentFile();
+      grantPermissions(parent);
+    }
+    if (deleteImpl(dir, false)) {
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // (d) symlink to a directory
       // (d) symlink to a directory
       return true;
       return true;
     }
     }
-
     // handle nonempty directory deletion
     // handle nonempty directory deletion
-    if (!fullyDeleteContents(dir)) {
+    if (!fullyDeleteContents(dir, tryGrantPermissions)) {
       return false;
       return false;
     }
     }
-    return dir.delete();
+    return deleteImpl(dir, true);
+  }
+  
+  /*
+   * Pure-Java implementation of "chmod +rwx f".
+   */
+  private static void grantPermissions(final File f) {
+      f.setExecutable(true);
+      f.setReadable(true);
+      f.setWritable(true);
   }
   }
 
 
+  private static boolean deleteImpl(final File f, final boolean doLog) {
+    if (f == null) {
+      LOG.warn("null file argument.");
+      return false;
+    }
+    final boolean wasDeleted = f.delete();
+    if (wasDeleted) {
+      return true;
+    }
+    final boolean ex = f.exists();
+    if (doLog && ex) {
+      LOG.warn("Failed to delete file or dir ["
+          + f.getAbsolutePath() + "]: it still exists.");
+    }
+    return !ex;
+  }
+  
   /**
   /**
    * Delete the contents of a directory, not the directory itself.  If
    * Delete the contents of a directory, not the directory itself.  If
    * we return false, the directory may be partially-deleted.
    * we return false, the directory may be partially-deleted.
    * If dir is a symlink to a directory, all the contents of the actual
    * If dir is a symlink to a directory, all the contents of the actual
    * directory pointed to by dir will be deleted.
    * directory pointed to by dir will be deleted.
    */
    */
-  public static boolean fullyDeleteContents(File dir) {
+  public static boolean fullyDeleteContents(final File dir) {
+    return fullyDeleteContents(dir, false);
+  }
+  
+  /**
+   * Delete the contents of a directory, not the directory itself.  If
+   * we return false, the directory may be partially-deleted.
+   * If dir is a symlink to a directory, all the contents of the actual
+   * directory pointed to by dir will be deleted.
+   * @param tryGrantPermissions if 'true', try grant +rwx permissions to this 
+   * and all the underlying directories before trying to delete their contents.
+   */
+  public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
+    if (tryGrantPermissions) {
+      // to be able to list the dir and delete files from it
+      // we must grant the dir rwx permissions: 
+      grantPermissions(dir);
+    }
     boolean deletionSucceeded = true;
     boolean deletionSucceeded = true;
-    File contents[] = dir.listFiles();
+    final File[] contents = dir.listFiles();
     if (contents != null) {
     if (contents != null) {
       for (int i = 0; i < contents.length; i++) {
       for (int i = 0; i < contents.length; i++) {
         if (contents[i].isFile()) {
         if (contents[i].isFile()) {
-          if (!contents[i].delete()) {// normal file or symlink to another file
+          if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file
             deletionSucceeded = false;
             deletionSucceeded = false;
             continue; // continue deletion of other files/dirs under dir
             continue; // continue deletion of other files/dirs under dir
           }
           }
@@ -121,16 +186,16 @@ public class FileUtil {
           // Either directory or symlink to another directory.
           // Either directory or symlink to another directory.
           // Try deleting the directory as this might be a symlink
           // Try deleting the directory as this might be a symlink
           boolean b = false;
           boolean b = false;
-          b = contents[i].delete();
+          b = deleteImpl(contents[i], false);
           if (b){
           if (b){
             //this was indeed a symlink or an empty directory
             //this was indeed a symlink or an empty directory
             continue;
             continue;
           }
           }
           // if not an empty directory or symlink let
           // if not an empty directory or symlink let
           // fullydelete handle it.
           // fullydelete handle it.
-          if (!fullyDelete(contents[i])) {
+          if (!fullyDelete(contents[i], tryGrantPermissions)) {
             deletionSucceeded = false;
             deletionSucceeded = false;
-            continue; // continue deletion of other files/dirs under dir
+            // continue deletion of other files/dirs under dir
           }
           }
         }
         }
       }
       }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -305,6 +305,7 @@ public class HttpServer implements FilterContainer {
     ret.setAcceptQueueSize(128);
     ret.setAcceptQueueSize(128);
     ret.setResolveNames(false);
     ret.setResolveNames(false);
     ret.setUseDirectBuffers(false);
     ret.setUseDirectBuffers(false);
+    ret.setHeaderBufferSize(1024*64);
     return ret;
     return ret;
   }
   }
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -122,7 +122,7 @@ public class CompressionCodecFactory {
     if (codecsString != null) {
     if (codecsString != null) {
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       while (codecSplit.hasMoreElements()) {
       while (codecSplit.hasMoreElements()) {
-        String codecSubstring = codecSplit.nextToken();
+        String codecSubstring = codecSplit.nextToken().trim();
         if (codecSubstring.length() != 0) {
         if (codecSubstring.length() != 0) {
           try {
           try {
             Class<?> cls = conf.getClassByName(codecSubstring);
             Class<?> cls = conf.getClassByName(codecSubstring);

+ 15 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -106,6 +106,8 @@ public class Client {
 
 
   private SocketFactory socketFactory;           // how to create sockets
   private SocketFactory socketFactory;           // how to create sockets
   private int refCount = 1;
   private int refCount = 1;
+
+  private final int connectionTimeout;
   
   
   final static int PING_CALL_ID = -1;
   final static int PING_CALL_ID = -1;
   
   
@@ -159,7 +161,16 @@ public class Client {
     }
     }
     return -1;
     return -1;
   }
   }
-  
+  /**
+   * set the connection timeout value in configuration
+   * 
+   * @param conf Configuration
+   * @param timeout the socket connect timeout value
+   */
+  public static final void setConnectTimeout(Configuration conf, int timeout) {
+    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
+  }
+
   /**
   /**
    * Increment this client's reference count
    * Increment this client's reference count
    *
    *
@@ -494,8 +505,7 @@ public class Client {
             }
             }
           }
           }
           
           
-          // connection time out is 20s
-          NetUtils.connect(this.socket, server, 20000);
+          NetUtils.connect(this.socket, server, connectionTimeout);
           if (rpcTimeout > 0) {
           if (rpcTimeout > 0) {
             pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
             pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
           }
           }
@@ -1034,6 +1044,8 @@ public class Client {
     this.valueClass = valueClass;
     this.valueClass = valueClass;
     this.conf = conf;
     this.conf = conf;
     this.socketFactory = factory;
     this.socketFactory = factory;
+    this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
+        CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
   }
   }
 
 
   /**
   /**

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
 import java.net.NetworkInterface;
 import java.net.NoRouteToHostException;
 import java.net.NoRouteToHostException;
+import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.SocketAddress;
 import java.net.SocketException;
 import java.net.SocketException;
@@ -865,4 +866,23 @@ public class NetUtils {
     }
     }
     return addrs;
     return addrs;
   }
   }
+
+  /**
+   * Return a free port number. There is no guarantee it will remain free, so
+   * it should be used immediately.
+   *
+   * @returns A free port for binding a local socket
+   */
+  public static int getFreeSocketPort() {
+    int port = 0;
+    try {
+      ServerSocket s = new ServerSocket(0);
+      port = s.getLocalPort();
+      s.close();
+      return port;
+    } catch (IOException e) {
+      // Could not get a free port. Return default port 0.
+    }
+    return port;
+  }
 }
 }

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -18,10 +18,13 @@
 
 
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
+import java.io.BufferedInputStream;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
@@ -148,8 +151,32 @@ public class Credentials implements Writable {
       in.close();
       in.close();
       return credentials;
       return credentials;
     } catch(IOException ioe) {
     } catch(IOException ioe) {
+      throw new IOException("Exception reading " + filename, ioe);
+    } finally {
       IOUtils.cleanup(LOG, in);
       IOUtils.cleanup(LOG, in);
+    }
+  }
+
+  /**
+   * Convenience method for reading a token storage file, and loading the Tokens
+   * therein in the passed UGI
+   * @param filename
+   * @param conf
+   * @throws IOException
+   */
+  public static Credentials readTokenStorageFile(File filename, Configuration conf)
+      throws IOException {
+    DataInputStream in = null;
+    Credentials credentials = new Credentials();
+    try {
+      in = new DataInputStream(new BufferedInputStream(
+          new FileInputStream(filename)));
+      credentials.readTokenStorageStream(in);
+      return credentials;
+    } catch(IOException ioe) {
       throw new IOException("Exception reading " + filename, ioe);
       throw new IOException("Exception reading " + filename, ioe);
+    } finally {
+      IOUtils.cleanup(LOG, in);
     }
     }
   }
   }
   
   

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.security;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 
 
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.AccessControlContext;
 import java.security.AccessControlContext;
@@ -656,10 +657,11 @@ public class UserGroupInformation {
 
 
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         if (fileLocation != null) {
         if (fileLocation != null) {
-          // load the token storage file and put all of the tokens into the
-          // user.
+          // Load the token storage file and put all of the tokens into the
+          // user. Don't use the FileSystem API for reading since it has a lock
+          // cycle (HADOOP-9212).
           Credentials cred = Credentials.readTokenStorageFile(
           Credentials cred = Credentials.readTokenStorageFile(
-              new Path("file:///" + fileLocation), conf);
+              new File(fileLocation), conf);
           loginUser.addCredentials(cred);
           loginUser.addCredentials(cred);
         }
         }
         loginUser.spawnAutoRenewalThreadForUserCreds();
         loginUser.spawnAutoRenewalThreadForUserCreds();

+ 62 - 28
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java

@@ -20,41 +20,78 @@ package org.apache.hadoop.util;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopVersionAnnotation;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
 /**
 /**
- * This class finds the package info for Hadoop and the HadoopVersionAnnotation
- * information.
+ * This class returns build information about Hadoop components.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class VersionInfo {
 public class VersionInfo {
   private static final Log LOG = LogFactory.getLog(VersionInfo.class);
   private static final Log LOG = LogFactory.getLog(VersionInfo.class);
 
 
-  private static Package myPackage;
-  private static HadoopVersionAnnotation version;
-  
-  static {
-    myPackage = HadoopVersionAnnotation.class.getPackage();
-    version = myPackage.getAnnotation(HadoopVersionAnnotation.class);
+  private Properties info;
+
+  protected VersionInfo(String component) {
+    info = new Properties();
+    String versionInfoFile = component + "-version-info.properties";
+    try {
+      InputStream is = Thread.currentThread().getContextClassLoader()
+        .getResourceAsStream(versionInfoFile);
+      info.load(is);
+    } catch (IOException ex) {
+      LogFactory.getLog(getClass()).warn("Could not read '" + 
+        versionInfoFile + "', " + ex.toString(), ex);
+    }
   }
   }
 
 
-  /**
-   * Get the meta-data for the Hadoop package.
-   * @return
-   */
-  static Package getPackage() {
-    return myPackage;
+  protected String _getVersion() {
+    return info.getProperty("version", "Unknown");
   }
   }
-  
+
+  protected String _getRevision() {
+    return info.getProperty("revision", "Unknown");
+  }
+
+  protected String _getBranch() {
+    return info.getProperty("branch", "Unknown");
+  }
+
+  protected String _getDate() {
+    return info.getProperty("date", "Unknown");
+  }
+
+  protected String _getUser() {
+    return info.getProperty("user", "Unknown");
+  }
+
+  protected String _getUrl() {
+    return info.getProperty("url", "Unknown");
+  }
+
+  protected String _getSrcChecksum() {
+    return info.getProperty("srcChecksum", "Unknown");
+  }
+
+  protected String _getBuildVersion(){
+    return getVersion() +
+      " from " + _getRevision() +
+      " by " + _getUser() +
+      " source checksum " + _getSrcChecksum();
+  }
+
+  private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
   /**
   /**
    * Get the Hadoop version.
    * Get the Hadoop version.
    * @return the Hadoop version string, eg. "0.6.3-dev"
    * @return the Hadoop version string, eg. "0.6.3-dev"
    */
    */
   public static String getVersion() {
   public static String getVersion() {
-    return version != null ? version.version() : "Unknown";
+    return COMMON_VERSION_INFO._getVersion();
   }
   }
   
   
   /**
   /**
@@ -62,7 +99,7 @@ public class VersionInfo {
    * @return the revision number, eg. "451451"
    * @return the revision number, eg. "451451"
    */
    */
   public static String getRevision() {
   public static String getRevision() {
-    return version != null ? version.revision() : "Unknown";
+    return COMMON_VERSION_INFO._getRevision();
   }
   }
 
 
   /**
   /**
@@ -70,7 +107,7 @@ public class VersionInfo {
    * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
    * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
    */
    */
   public static String getBranch() {
   public static String getBranch() {
-    return version != null ? version.branch() : "Unknown";
+    return COMMON_VERSION_INFO._getBranch();
   }
   }
 
 
   /**
   /**
@@ -78,7 +115,7 @@ public class VersionInfo {
    * @return the compilation date in unix date format
    * @return the compilation date in unix date format
    */
    */
   public static String getDate() {
   public static String getDate() {
-    return version != null ? version.date() : "Unknown";
+    return COMMON_VERSION_INFO._getDate();
   }
   }
   
   
   /**
   /**
@@ -86,14 +123,14 @@ public class VersionInfo {
    * @return the username of the user
    * @return the username of the user
    */
    */
   public static String getUser() {
   public static String getUser() {
-    return version != null ? version.user() : "Unknown";
+    return COMMON_VERSION_INFO._getUser();
   }
   }
   
   
   /**
   /**
    * Get the subversion URL for the root Hadoop directory.
    * Get the subversion URL for the root Hadoop directory.
    */
    */
   public static String getUrl() {
   public static String getUrl() {
-    return version != null ? version.url() : "Unknown";
+    return COMMON_VERSION_INFO._getUrl();
   }
   }
 
 
   /**
   /**
@@ -101,7 +138,7 @@ public class VersionInfo {
    * built.
    * built.
    **/
    **/
   public static String getSrcChecksum() {
   public static String getSrcChecksum() {
-    return version != null ? version.srcChecksum() : "Unknown";
+    return COMMON_VERSION_INFO._getSrcChecksum();
   }
   }
 
 
   /**
   /**
@@ -109,14 +146,11 @@ public class VersionInfo {
    * revision, user and date. 
    * revision, user and date. 
    */
    */
   public static String getBuildVersion(){
   public static String getBuildVersion(){
-    return VersionInfo.getVersion() + 
-    " from " + VersionInfo.getRevision() +
-    " by " + VersionInfo.getUser() + 
-    " source checksum " + VersionInfo.getSrcChecksum();
+    return COMMON_VERSION_INFO._getBuildVersion();
   }
   }
   
   
   public static void main(String[] args) {
   public static void main(String[] args) {
-    LOG.debug("version: "+ version);
+    LOG.debug("version: "+ getVersion());
     System.out.println("Hadoop " + getVersion());
     System.out.println("Hadoop " + getVersion());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Compiled by " + getUser() + " on " + getDate());
     System.out.println("Compiled by " + getUser() + " on " + getDate());

+ 13 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo

@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.security.AnnotatedSecurityInfo
 org.apache.hadoop.security.AnnotatedSecurityInfo

+ 25 - 0
hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties

@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version=${pom.version}
+revision=${version-info.scm.commit}
+branch=${version-info.scm.branch}
+user=${user.name}
+date=${version-info.build.time}
+url=${version-info.scm.uri}
+srcChecksum=${version-info.source.md5}

+ 18 - 2
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -80,9 +80,17 @@
 
 
 <property>
 <property>
   <name>hadoop.security.group.mapping</name>
   <name>hadoop.security.group.mapping</name>
-  <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+  <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
   <description>
   <description>
-    Class for user to group mapping (get groups for a given user) for ACL
+    Class for user to group mapping (get groups for a given user) for ACL. 
+    The default implementation,
+    org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
+    will determine if the Java Native Interface (JNI) is available. If JNI is 
+    available the implementation will use the API within hadoop to resolve a 
+    list of groups for a user. If JNI is not available then the shell 
+    implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
+    shells out to the Linux/Unix environment with the 
+    <code>bash -c groups</code> command to resolve a list of groups for a user.
   </description>
   </description>
 </property>
 </property>
 
 
@@ -565,6 +573,14 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>ipc.client.connect.timeout</name>
+  <value>20000</value>
+  <description>Indicates the number of milliseconds a client will wait for the 
+               socket to establish a server connection.
+  </description>
+</property>
+
 <property>
 <property>
   <name>ipc.client.connect.max.retries.on.timeouts</name>
   <name>ipc.client.connect.max.retries.on.timeouts</name>
   <value>45</value>
   <value>45</value>

+ 17 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;

+ 100 - 31
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import org.junit.Before;
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
 import java.io.FileReader;
 import java.io.FileReader;
@@ -173,12 +174,26 @@ public class TestFileUtil {
       //Expected an IOException
       //Expected an IOException
     }
     }
   }
   }
+
+  @Before
+  public void before() throws IOException {
+    cleanupImpl();
+  }
   
   
   @After
   @After
   public void tearDown() throws IOException {
   public void tearDown() throws IOException {
-    FileUtil.fullyDelete(del);
-    FileUtil.fullyDelete(tmp);
-    FileUtil.fullyDelete(partitioned);
+    cleanupImpl();
+  }
+  
+  private void cleanupImpl() throws IOException  {
+    FileUtil.fullyDelete(del, true);
+    Assert.assertTrue(!del.exists());
+    
+    FileUtil.fullyDelete(tmp, true);
+    Assert.assertTrue(!tmp.exists());
+    
+    FileUtil.fullyDelete(partitioned, true);
+    Assert.assertTrue(!partitioned.exists());
   }
   }
 
 
   @Test
   @Test
@@ -269,12 +284,14 @@ public class TestFileUtil {
     Assert.assertTrue(new File(tmp, FILE).exists());
     Assert.assertTrue(new File(tmp, FILE).exists());
   }
   }
 
 
-  private File xSubDir = new File(del, "xsubdir");
-  private File ySubDir = new File(del, "ysubdir");
-  static String file1Name = "file1";
-  private File file2 = new File(xSubDir, "file2");
-  private File file3 = new File(ySubDir, "file3");
-  private File zlink = new File(del, "zlink");
+  private final File xSubDir = new File(del, "xSubDir");
+  private final File xSubSubDir = new File(xSubDir, "xSubSubDir");
+  private final File ySubDir = new File(del, "ySubDir");
+  private static final String file1Name = "file1";
+  private final File file2 = new File(xSubDir, "file2");
+  private final File file22 = new File(xSubSubDir, "file22");
+  private final File file3 = new File(ySubDir, "file3");
+  private final File zlink = new File(del, "zlink");
   
   
   /**
   /**
    * Creates a directory which can not be deleted completely.
    * Creates a directory which can not be deleted completely.
@@ -286,10 +303,14 @@ public class TestFileUtil {
    *                       |
    *                       |
    *    .---------------------------------------,
    *    .---------------------------------------,
    *    |            |              |           |
    *    |            |              |           |
-   *  file1(!w)   xsubdir(-w)   ysubdir(+w)   zlink
-   *                 |              |
-   *               file2          file3
-   *
+   *  file1(!w)   xSubDir(-rwx)   ySubDir(+w)   zlink
+   *              |  |              |
+   *              | file2(-rwx)   file3
+   *              |
+   *            xSubSubDir(-rwx) 
+   *              |
+   *             file22(-rwx)
+   *             
    * @throws IOException
    * @throws IOException
    */
    */
   private void setupDirsAndNonWritablePermissions() throws IOException {
   private void setupDirsAndNonWritablePermissions() throws IOException {
@@ -302,7 +323,16 @@ public class TestFileUtil {
 
 
     xSubDir.mkdirs();
     xSubDir.mkdirs();
     file2.createNewFile();
     file2.createNewFile();
-    xSubDir.setWritable(false);
+    
+    xSubSubDir.mkdirs();
+    file22.createNewFile();
+    
+    revokePermissions(file22);
+    revokePermissions(xSubSubDir);
+    
+    revokePermissions(file2);
+    revokePermissions(xSubDir);
+    
     ySubDir.mkdirs();
     ySubDir.mkdirs();
     file3.createNewFile();
     file3.createNewFile();
 
 
@@ -314,23 +344,43 @@ public class TestFileUtil {
     FileUtil.symLink(tmpFile.toString(), zlink.toString());
     FileUtil.symLink(tmpFile.toString(), zlink.toString());
   }
   }
   
   
+  private static void grantPermissions(final File f) {
+    f.setReadable(true);
+    f.setWritable(true);
+    f.setExecutable(true);
+  }
+  
+  private static void revokePermissions(final File f) {
+     f.setWritable(false);
+     f.setExecutable(false);
+     f.setReadable(false);
+  }
+  
   // Validates the return value.
   // Validates the return value.
-  // Validates the existence of directory "xsubdir" and the file "file1"
-  // Sets writable permissions for the non-deleted dir "xsubdir" so that it can
-  // be deleted in tearDown().
-  private void validateAndSetWritablePermissions(boolean ret) {
-    xSubDir.setWritable(true);
-    Assert.assertFalse("The return value should have been false!", ret);
-    Assert.assertTrue("The file file1 should not have been deleted!",
+  // Validates the existence of the file "file1"
+  private void validateAndSetWritablePermissions(
+      final boolean expectedRevokedPermissionDirsExist, final boolean ret) {
+    grantPermissions(xSubDir);
+    grantPermissions(xSubSubDir);
+    
+    Assert.assertFalse("The return value should have been false.", ret);
+    Assert.assertTrue("The file file1 should not have been deleted.",
         new File(del, file1Name).exists());
         new File(del, file1Name).exists());
-    Assert.assertTrue(
-        "The directory xsubdir should not have been deleted!",
-        xSubDir.exists());
-    Assert.assertTrue("The file file2 should not have been deleted!",
-        file2.exists());
-    Assert.assertFalse("The directory ysubdir should have been deleted!",
+    
+    Assert.assertEquals(
+        "The directory xSubDir *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, xSubDir.exists());
+    Assert.assertEquals("The file file2 *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, file2.exists());
+    Assert.assertEquals(
+        "The directory xSubSubDir *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, xSubSubDir.exists());
+    Assert.assertEquals("The file file22 *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, file22.exists());
+    
+    Assert.assertFalse("The directory ySubDir should have been deleted.",
         ySubDir.exists());
         ySubDir.exists());
-    Assert.assertFalse("The link zlink should have been deleted!",
+    Assert.assertFalse("The link zlink should have been deleted.",
         zlink.exists());
         zlink.exists());
   }
   }
 
 
@@ -339,7 +389,15 @@ public class TestFileUtil {
     LOG.info("Running test to verify failure of fullyDelete()");
     LOG.info("Running test to verify failure of fullyDelete()");
     setupDirsAndNonWritablePermissions();
     setupDirsAndNonWritablePermissions();
     boolean ret = FileUtil.fullyDelete(new MyFile(del));
     boolean ret = FileUtil.fullyDelete(new MyFile(del));
-    validateAndSetWritablePermissions(ret);
+    validateAndSetWritablePermissions(true, ret);
+  }
+  
+  @Test
+  public void testFailFullyDeleteGrantPermissions() throws IOException {
+    setupDirsAndNonWritablePermissions();
+    boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
+    // this time the directories with revoked permissions *should* be deleted:
+    validateAndSetWritablePermissions(false, ret);
   }
   }
 
 
   /**
   /**
@@ -388,7 +446,10 @@ public class TestFileUtil {
      */
      */
     @Override
     @Override
     public File[] listFiles() {
     public File[] listFiles() {
-      File[] files = super.listFiles();
+      final File[] files = super.listFiles();
+      if (files == null) {
+         return null;
+      }
       List<File> filesList = Arrays.asList(files);
       List<File> filesList = Arrays.asList(files);
       Collections.sort(filesList);
       Collections.sort(filesList);
       File[] myFiles = new MyFile[files.length];
       File[] myFiles = new MyFile[files.length];
@@ -405,9 +466,17 @@ public class TestFileUtil {
     LOG.info("Running test to verify failure of fullyDeleteContents()");
     LOG.info("Running test to verify failure of fullyDeleteContents()");
     setupDirsAndNonWritablePermissions();
     setupDirsAndNonWritablePermissions();
     boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
     boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
-    validateAndSetWritablePermissions(ret);
+    validateAndSetWritablePermissions(true, ret);
   }
   }
 
 
+  @Test
+  public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
+    setupDirsAndNonWritablePermissions();
+    boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
+    // this time the directories with revoked permissions *should* be deleted:
+    validateAndSetWritablePermissions(false, ret);
+  }
+  
   @Test
   @Test
   public void testCopyMergeSingleDirectory() throws IOException {
   public void testCopyMergeSingleDirectory() throws IOException {
     setupDirs();
     setupDirs();

+ 29 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -119,6 +119,18 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     }    
     }    
   }
   }
 
 
+  @SuppressWarnings("serial")
+  public static class LongHeaderServlet extends HttpServlet {
+    @SuppressWarnings("unchecked")
+    @Override
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+    ) throws ServletException, IOException {
+      Assert.assertEquals(63 * 1024, request.getHeader("longheader").length());
+      response.setStatus(HttpServletResponse.SC_OK);
+    }
+  }
+
   @SuppressWarnings("serial")
   @SuppressWarnings("serial")
   public static class HtmlContentServlet extends HttpServlet {
   public static class HtmlContentServlet extends HttpServlet {
     @Override
     @Override
@@ -139,6 +151,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
+    server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
     server.addJerseyResourcePackage(
     server.addJerseyResourcePackage(
         JerseyResource.class.getPackage().getName(), "/jersey/*");
         JerseyResource.class.getPackage().getName(), "/jersey/*");
     server.start();
     server.start();
@@ -197,6 +210,22 @@ public class TestHttpServer extends HttpServerFunctionalTest {
                  readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
                  readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
   }
   }
 
 
+  /** 
+   *  Test that verifies headers can be up to 64K long. 
+   *  The test adds a 63K header leaving 1K for other headers.
+   *  This is because the header buffer setting is for ALL headers,
+   *  names and values included. */
+  @Test public void testLongHeader() throws Exception {
+    URL url = new URL(baseUrl, "/longheader");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0 ; i < 63 * 1024; i++) {
+      sb.append("a");
+    }
+    conn.setRequestProperty("longheader", sb.toString());
+    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+  }
+
   @Test public void testContentTypes() throws Exception {
   @Test public void testContentTypes() throws Exception {
     // Static CSS files should have text/css
     // Static CSS files should have text/css
     URL cssUrl = new URL(baseUrl, "/static/test.css");
     URL cssUrl = new URL(baseUrl, "/static/test.css");

+ 12 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java

@@ -256,5 +256,17 @@ public class TestCodecFactory extends TestCase {
     checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
     checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
     codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
     codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
     checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
     checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
+    
+    Configuration conf = new Configuration();
+    conf.set("io.compression.codecs", 
+        "   org.apache.hadoop.io.compress.GzipCodec   , " +
+        "    org.apache.hadoop.io.compress.DefaultCodec  , " +
+        " org.apache.hadoop.io.compress.BZip2Codec   ");
+    try {
+      CompressionCodecFactory.getCodecClasses(conf);
+    } catch (IllegalArgumentException e) {
+      fail("IllegalArgumentException is unexpected");
+    }
+
   }
   }
 }
 }

+ 15 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java

@@ -67,7 +67,7 @@ public class RPCCallBenchmark implements Tool, Configurable {
     private int serverReaderThreads = 1;
     private int serverReaderThreads = 1;
     private int clientThreads = 0;
     private int clientThreads = 0;
     private String host = "0.0.0.0";
     private String host = "0.0.0.0";
-    private int port = 12345;
+    private int port = 0;
     public int secondsToRun = 15;
     public int secondsToRun = 15;
     private int msgSize = 1024;
     private int msgSize = 1024;
     public Class<? extends RpcEngine> rpcEngine =
     public Class<? extends RpcEngine> rpcEngine =
@@ -201,11 +201,21 @@ public class RPCCallBenchmark implements Tool, Configurable {
       }
       }
     }
     }
     
     
+    public int getPort() {
+      if (port == 0) {
+        port = NetUtils.getFreeSocketPort();
+        if (port == 0) {
+          throw new RuntimeException("Could not find a free port");
+        }
+      }
+      return port;
+    }
+
     @Override
     @Override
     public String toString() {
     public String toString() {
       return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
       return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
           + "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
           + "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
-          + clientThreads + "\nhost=" + host + "\nport=" + port
+          + clientThreads + "\nhost=" + host + "\nport=" + getPort()
           + "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
           + "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
     }
     }
   }
   }
@@ -228,12 +238,12 @@ public class RPCCallBenchmark implements Tool, Configurable {
           .newReflectiveBlockingService(serverImpl);
           .newReflectiveBlockingService(serverImpl);
 
 
       server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
       server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
-          .setInstance(service).setBindAddress(opts.host).setPort(opts.port)
+          .setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
           .setNumHandlers(opts.serverThreads).setVerbose(false).build();
           .setNumHandlers(opts.serverThreads).setVerbose(false).build();
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
       server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
       server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
           .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
           .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
-          .setPort(opts.port).setNumHandlers(opts.serverThreads)
+          .setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
           .setVerbose(false).build();
           .setVerbose(false).build();
     } else {
     } else {
       throw new RuntimeException("Bad engine: " + opts.rpcEngine);
       throw new RuntimeException("Bad engine: " + opts.rpcEngine);
@@ -378,7 +388,7 @@ public class RPCCallBenchmark implements Tool, Configurable {
    * Create a client proxy for the specified engine.
    * Create a client proxy for the specified engine.
    */
    */
   private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
   private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.port);
+    InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort());
     
     
     if (opts.rpcEngine == ProtobufRpcEngine.class) {
     if (opts.rpcEngine == ProtobufRpcEngine.class) {
       final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);
       final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -62,7 +62,6 @@ public class TestIPC {
   final private static Configuration conf = new Configuration();
   final private static Configuration conf = new Configuration();
   final static private int PING_INTERVAL = 1000;
   final static private int PING_INTERVAL = 1000;
   final static private int MIN_SLEEP_TIME = 1000;
   final static private int MIN_SLEEP_TIME = 1000;
-
   /**
   /**
    * Flag used to turn off the fault injection behavior
    * Flag used to turn off the fault injection behavior
    * of the various writables.
    * of the various writables.
@@ -499,6 +498,26 @@ public class TestIPC {
     client.call(new LongWritable(RANDOM.nextLong()),
     client.call(new LongWritable(RANDOM.nextLong()),
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
   }
   }
+
+  @Test
+  public void testIpcConnectTimeout() throws Exception {
+    // start server
+    Server server = new TestServer(1, true);
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    //Intentionally do not start server to get a connection timeout
+
+    // start client
+    Client.setConnectTimeout(conf, 100);
+    Client client = new Client(LongWritable.class, conf);
+    // set the rpc timeout to twice the MIN_SLEEP_TIME
+    try {
+      client.call(new LongWritable(RANDOM.nextLong()),
+              addr, null, null, MIN_SLEEP_TIME*2, conf);
+      fail("Expected an exception to have been thrown");
+    } catch (SocketTimeoutException e) {
+      LOG.info("Get a SocketTimeoutException ", e);
+    }
+  }
   
   
   /**
   /**
    * Check that file descriptors aren't leaked by starting
    * Check that file descriptors aren't leaked by starting

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier

@@ -1,2 +1,15 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
 org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
 org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
 org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier

+ 0 - 3
hadoop-common-project/pom.xml

@@ -49,9 +49,6 @@
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
     </plugins>
     </plugins>

+ 0 - 3
hadoop-dist/pom.xml

@@ -66,9 +66,6 @@
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
     </plugins>
     </plugins>

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -359,6 +359,8 @@
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
           <excludes>
           <excludes>
+            <exclude>src/test/resources/classutils.txt</exclude>
+            <exclude>src/main/conf/httpfs-signature.secret</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>

+ 17 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java

@@ -29,6 +29,9 @@ import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.ServletResponse;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
+import java.net.UnknownHostException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * Filter that resolves the requester hostname.
  * Filter that resolves the requester hostname.
@@ -36,6 +39,7 @@ import java.net.InetAddress;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class HostnameFilter implements Filter {
 public class HostnameFilter implements Filter {
   static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
   static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
+  private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class);
 
 
   /**
   /**
    * Initializes the filter.
    * Initializes the filter.
@@ -66,7 +70,19 @@ public class HostnameFilter implements Filter {
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
     throws IOException, ServletException {
     throws IOException, ServletException {
     try {
     try {
-      String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName();
+      String hostname;
+      try {
+        String address = request.getRemoteAddr();
+        if (address != null) {
+          hostname = InetAddress.getByName(address).getCanonicalHostName();
+        } else {
+          log.warn("Request remote address is NULL");
+          hostname = "???";
+        }
+      } catch (UnknownHostException ex) {
+        log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex);
+        hostname = "???";
+      }
       HOSTNAME_TL.set(hostname);
       HOSTNAME_TL.set(hostname);
       chain.doFilter(request, response);
       chain.doFilter(request, response);
     } finally {
     } finally {

+ 26 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java

@@ -64,4 +64,30 @@ public class TestHostnameFilter extends HTestCase {
     filter.destroy();
     filter.destroy();
   }
   }
 
 
+  @Test
+  public void testMissingHostname() throws Exception {
+    ServletRequest request = Mockito.mock(ServletRequest.class);
+    Mockito.when(request.getRemoteAddr()).thenReturn(null);
+
+    ServletResponse response = Mockito.mock(ServletResponse.class);
+
+    final AtomicBoolean invoked = new AtomicBoolean();
+
+    FilterChain chain = new FilterChain() {
+      @Override
+      public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
+        throws IOException, ServletException {
+        assertTrue(HostnameFilter.get().contains("???"));
+        invoked.set(true);
+      }
+    };
+
+    Filter filter = new HostnameFilter();
+    filter.init(null);
+    assertNull(HostnameFilter.get());
+    filter.doFilter(request, response, chain);
+    assertTrue(invoked.get());
+    assertNull(HostnameFilter.get());
+    filter.destroy();
+  }
 }
 }

+ 47 - 2
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -309,6 +309,11 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null
     HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null
     response. (suresh)
     response. (suresh)
 
 
+    HDFS-4364. GetLinkTargetResponseProto does not handle null path. (suresh)
+
+    HDFS-4369. GetBlockKeysResponseProto does not handle null response.
+    (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@@ -480,8 +485,22 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4381. Document fsimage format details in FSImageFormat class javadoc.
     HDFS-4381. Document fsimage format details in FSImageFormat class javadoc.
     (Jing Zhao via suresh)
     (Jing Zhao via suresh)
 
 
+    HDFS-4375. Use token request messages defined in hadoop common.
+    (suresh)
+
+    HDFS-4392. Use NetUtils#getFreeSocketPort in MiniDFSCluster.
+    (Andrew Purtell via suresh)
+
+    HDFS-4393. Make empty request and responses in protocol translators can be
+    static final members. (Brandon Li via suresh)
+
+    HDFS-4403. DFSClient can infer checksum type when not provided by reading
+    first byte (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-3429. DataNode reads checksums even if client does not need them (todd)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
     HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
@@ -703,6 +722,12 @@ Release 2.0.3-alpha - Unreleased
 
 
     HDFS-1245. Pluggable block id generation. (shv)
     HDFS-1245. Pluggable block id generation. (shv)
 
 
+    HDFS-4415. HostnameFilter should handle hostname resolution failures and
+    continue processing. (Robert Kanter via atm)
+
+    HDFS-4359. Slow RPC responses from NN can prevent metrics collection on
+    DNs. (liang xie via atm)
+
   BREAKDOWN OF HDFS-3077 SUBTASKS
   BREAKDOWN OF HDFS-3077 SUBTASKS
 
 
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -805,9 +830,12 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
     HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
     (Chao Shi via todd)
     (Chao Shi via todd)
 
 
-    HDFS-4351.  In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
+    HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
     needs to be updated when avoiding stale nodes.  (Andrew Wang via szetszwo)
     needs to be updated when avoiding stale nodes.  (Andrew Wang via szetszwo)
 
 
+    HDFS-4399. Fix RAT warnings by excluding images sub-dir in docs. (Thomas
+    Graves via acmurthy) 
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2185,6 +2213,18 @@ Release 2.0.0-alpha - 05-23-2012
     
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
 
+Release 0.23.7 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2202,7 +2242,12 @@ Release 0.23.6 - UNRELEASED
     HDFS-4248. Renaming directories may incorrectly remove the paths in leases
     HDFS-4248. Renaming directories may incorrectly remove the paths in leases
     under the tree.  (daryn via szetszwo)
     under the tree.  (daryn via szetszwo)
 
 
-Release 0.23.5 - UNRELEASED
+    HDFS-4385. Maven RAT plugin is not checking all source files (tgraves)
+
+    HDFS-4426. Secondary namenode shuts down immediately after startup.
+    (Arpit Agarwal via suresh)
+
+Release 0.23.5 - 2012-11-28
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 

+ 27 - 0
hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt

@@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* classes:
  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
  * POSSIBILITY OF SUCH DAMAGE.
  * POSSIBILITY OF SUCH DAMAGE.
  */
  */
+
+For src/main/native/util/tree.h:
+
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -516,9 +516,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/test/resources/data*</exclude>
             <exclude>src/test/resources/data*</exclude>
             <exclude>src/test/resources/editsStored*</exclude>
             <exclude>src/test/resources/editsStored*</exclude>
             <exclude>src/test/resources/empty-file</exclude>
             <exclude>src/test/resources/empty-file</exclude>
+            <exclude>src/main/native/util/tree.h</exclude>
+            <exclude>src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
             <exclude>src/contrib/**</exclude>
             <exclude>src/contrib/**</exclude>
+            <exclude>src/site/resources/images/*</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
@@ -563,6 +566,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
                       <arg line="VERBOSE=1"/>
                     </exec>
                     </exec>
+                    <!-- The second make is a workaround for HADOOP-9215.  It can
+                         be removed when version 2.6 of cmake is no longer supported . -->
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
                   </target>
                   </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 #ifndef CONFIG_H
 #ifndef CONFIG_H
 #define CONFIG_H
 #define CONFIG_H
 
 

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml

@@ -92,10 +92,11 @@ There is no provision within HDFS for creating user identities, establishing gro
 
 
 <section><title>Group Mapping</title>
 <section><title>Group Mapping</title>
 <p>
 <p>
-Once a username has been determined as described above, the list of groups is determined by a <em>group mapping
-service</em>, configured by the <code>hadoop.security.group.mapping</code> property.
-The default implementation, <code>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</code>, will shell out
-to the Unix <code>bash -c groups</code> command to resolve a list of groups for a user.
+Once a username has been determined as described above, the list of groups is 
+determined by a <em>group mapping service</em>, configured by the 
+<code>hadoop.security.group.mapping</code> property. Refer to the 
+core-default.xml for details of the <code>hadoop.security.group.mapping</code>
+implementation.
 </p>
 </p>
 <p>
 <p>
 An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available
 An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available

+ 126 - 38
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -152,6 +152,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
@@ -1571,7 +1572,7 @@ public class DFSClient implements java.io.Closeable {
    */
    */
   public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
   public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
     checkOpen();
     checkOpen();
-    return getFileChecksum(src, namenode, socketFactory,
+    return getFileChecksum(src, clientName, namenode, socketFactory,
         dfsClientConf.socketTimeout, getDataEncryptionKey(),
         dfsClientConf.socketTimeout, getDataEncryptionKey(),
         dfsClientConf.connectToDnViaHostname);
         dfsClientConf.connectToDnViaHostname);
   }
   }
@@ -1614,9 +1615,16 @@ public class DFSClient implements java.io.Closeable {
   /**
   /**
    * Get the checksum of a file.
    * Get the checksum of a file.
    * @param src The file path
    * @param src The file path
+   * @param clientName the name of the client requesting the checksum.
+   * @param namenode the RPC proxy for the namenode
+   * @param socketFactory to create sockets to connect to DNs
+   * @param socketTimeout timeout to use when connecting and waiting for a response
+   * @param encryptionKey the key needed to communicate with DNs in this cluster
+   * @param connectToDnViaHostname {@see #connectToDnViaHostname()}
    * @return The checksum 
    * @return The checksum 
    */
    */
-  public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+  static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+      String clientName,
       ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
       ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
       DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
       DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
       throws IOException {
       throws IOException {
@@ -1651,32 +1659,16 @@ public class DFSClient implements java.io.Closeable {
       final int timeout = 3000 * datanodes.length + socketTimeout;
       final int timeout = 3000 * datanodes.length + socketTimeout;
       boolean done = false;
       boolean done = false;
       for(int j = 0; !done && j < datanodes.length; j++) {
       for(int j = 0; !done && j < datanodes.length; j++) {
-        Socket sock = null;
         DataOutputStream out = null;
         DataOutputStream out = null;
         DataInputStream in = null;
         DataInputStream in = null;
         
         
         try {
         try {
           //connect to a datanode
           //connect to a datanode
-          sock = socketFactory.createSocket();
-          String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Connecting to datanode " + dnAddr);
-          }
-          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
-          sock.setSoTimeout(timeout);
-
-          OutputStream unbufOut = NetUtils.getOutputStream(sock);
-          InputStream unbufIn = NetUtils.getInputStream(sock);
-          if (encryptionKey != null) {
-            IOStreamPair encryptedStreams =
-                DataTransferEncryptor.getEncryptedStreams(
-                    unbufOut, unbufIn, encryptionKey);
-            unbufOut = encryptedStreams.out;
-            unbufIn = encryptedStreams.in;
-          }
-          out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+          IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+              encryptionKey, datanodes[j], timeout);
+          out = new DataOutputStream(new BufferedOutputStream(pair.out,
               HdfsConstants.SMALL_BUFFER_SIZE));
               HdfsConstants.SMALL_BUFFER_SIZE));
-          in = new DataInputStream(unbufIn);
+          in = new DataInputStream(pair.in);
 
 
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("write to " + datanodes[j] + ": "
             LOG.debug("write to " + datanodes[j] + ": "
@@ -1689,19 +1681,8 @@ public class DFSClient implements java.io.Closeable {
             BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
             BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
 
 
           if (reply.getStatus() != Status.SUCCESS) {
           if (reply.getStatus() != Status.SUCCESS) {
-            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
-                && i > lastRetriedIndex) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
-                    + "for file " + src + " for block " + block
-                    + " from datanode " + datanodes[j]
-                    + ". Will retry the block once.");
-              }
-              lastRetriedIndex = i;
-              done = true; // actually it's not done; but we'll retry
-              i--; // repeat at i-th block
-              refetchBlocks = true;
-              break;
+            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+              throw new InvalidBlockTokenException();
             } else {
             } else {
               throw new IOException("Bad response " + reply + " for block "
               throw new IOException("Bad response " + reply + " for block "
                   + block + " from datanode " + datanodes[j]);
                   + block + " from datanode " + datanodes[j]);
@@ -1733,8 +1714,18 @@ public class DFSClient implements java.io.Closeable {
           md5.write(md5out);
           md5.write(md5out);
           
           
           // read crc-type
           // read crc-type
-          final DataChecksum.Type ct = PBHelper.convert(checksumData
-              .getCrcType());
+          final DataChecksum.Type ct;
+          if (checksumData.hasCrcType()) {
+            ct = PBHelper.convert(checksumData
+                .getCrcType());
+          } else {
+            LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
+                      "inferring checksum by reading first byte");
+            ct = inferChecksumTypeByReading(
+                clientName, socketFactory, socketTimeout, lb, datanodes[j],
+                encryptionKey, connectToDnViaHostname);
+          }
+
           if (i == 0) { // first block
           if (i == 0) { // first block
             crcType = ct;
             crcType = ct;
           } else if (crcType != DataChecksum.Type.MIXED
           } else if (crcType != DataChecksum.Type.MIXED
@@ -1752,12 +1743,25 @@ public class DFSClient implements java.io.Closeable {
             }
             }
             LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
             LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
           }
           }
+        } catch (InvalidBlockTokenException ibte) {
+          if (i > lastRetriedIndex) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
+                  + "for file " + src + " for block " + block
+                  + " from datanode " + datanodes[j]
+                  + ". Will retry the block once.");
+            }
+            lastRetriedIndex = i;
+            done = true; // actually it's not done; but we'll retry
+            i--; // repeat at i-th block
+            refetchBlocks = true;
+            break;
+          }
         } catch (IOException ie) {
         } catch (IOException ie) {
           LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
           LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
         } finally {
         } finally {
           IOUtils.closeStream(in);
           IOUtils.closeStream(in);
           IOUtils.closeStream(out);
           IOUtils.closeStream(out);
-          IOUtils.closeSocket(sock);        
         }
         }
       }
       }
 
 
@@ -1789,6 +1793,90 @@ public class DFSClient implements java.io.Closeable {
     }
     }
   }
   }
 
 
+  /**
+   * Connect to the given datanode's datantrasfer port, and return
+   * the resulting IOStreamPair. This includes encryption wrapping, etc.
+   */
+  private static IOStreamPair connectToDN(
+      SocketFactory socketFactory, boolean connectToDnViaHostname,
+      DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
+      throws IOException
+  {
+    boolean success = false;
+    Socket sock = null;
+    try {
+      sock = socketFactory.createSocket();
+      String dnAddr = dn.getXferAddr(connectToDnViaHostname);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Connecting to datanode " + dnAddr);
+      }
+      NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
+      sock.setSoTimeout(timeout);
+  
+      OutputStream unbufOut = NetUtils.getOutputStream(sock);
+      InputStream unbufIn = NetUtils.getInputStream(sock);
+      IOStreamPair ret;
+      if (encryptionKey != null) {
+        ret = DataTransferEncryptor.getEncryptedStreams(
+                unbufOut, unbufIn, encryptionKey);
+      } else {
+        ret = new IOStreamPair(unbufIn, unbufOut);        
+      }
+      success = true;
+      return ret;
+    } finally {
+      if (!success) {
+        IOUtils.closeSocket(sock);
+      }
+    }
+  }
+  
+  /**
+   * Infer the checksum type for a replica by sending an OP_READ_BLOCK
+   * for the first byte of that replica. This is used for compatibility
+   * with older HDFS versions which did not include the checksum type in
+   * OpBlockChecksumResponseProto.
+   *
+   * @param in input stream from datanode
+   * @param out output stream to datanode
+   * @param lb the located block
+   * @param clientName the name of the DFSClient requesting the checksum
+   * @param dn the connected datanode
+   * @return the inferred checksum type
+   * @throws IOException if an error occurs
+   */
+  private static Type inferChecksumTypeByReading(
+      String clientName, SocketFactory socketFactory, int socketTimeout,
+      LocatedBlock lb, DatanodeInfo dn,
+      DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+      throws IOException {
+    IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+        encryptionKey, dn, socketTimeout);
+
+    try {
+      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
+          HdfsConstants.SMALL_BUFFER_SIZE));
+      DataInputStream in = new DataInputStream(pair.in);
+  
+      new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
+      final BlockOpResponseProto reply =
+          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
+      
+      if (reply.getStatus() != Status.SUCCESS) {
+        if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+          throw new InvalidBlockTokenException();
+        } else {
+          throw new IOException("Bad response " + reply + " trying to read "
+              + lb.getBlock() + " from datanode " + dn);
+        }
+      }
+      
+      return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
+    } finally {
+      IOUtils.cleanup(null, pair.in, pair.out);
+    }
+  }
+
   /**
   /**
    * Set permissions to a file or directory.
    * Set permissions to a file or directory.
    * @param src path name.
    * @param src path name.

+ 12 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java

@@ -40,14 +40,18 @@ import org.apache.hadoop.tools.GetUserMappingsProtocol;
 public class HDFSPolicyProvider extends PolicyProvider {
 public class HDFSPolicyProvider extends PolicyProvider {
   private static final Service[] hdfsServices =
   private static final Service[] hdfsServices =
     new Service[] {
     new Service[] {
-    new Service("security.client.protocol.acl", ClientProtocol.class),
-    new Service("security.client.datanode.protocol.acl", 
-                ClientDatanodeProtocol.class),
-    new Service("security.datanode.protocol.acl", DatanodeProtocol.class),
-    new Service("security.inter.datanode.protocol.acl", 
-                InterDatanodeProtocol.class),
-    new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
-    new Service("security.qjournal.service.protocol.acl", QJournalProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_CLIENT_PROTOCOL_ACL,
+        ClientProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_CLIENT_DATANODE_PROTOCOL_ACL,
+        ClientDatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_DATANODE_PROTOCOL_ACL,
+        DatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_INTER_DATANODE_PROTOCOL_ACL, 
+        InterDatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_NAMENODE_PROTOCOL_ACL,
+        NamenodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL,
+        QJournalProtocol.class),
     new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
     new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
         HAServiceProtocol.class),
         HAServiceProtocol.class),
     new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
     new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java

@@ -380,7 +380,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
     // in and out will be closed when sock is closed (by the caller)
     // in and out will be closed when sock is closed (by the caller)
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
           NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
           NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum);
     
     
     //
     //
     // Get bytes in block, set streams
     // Get bytes in block, set streams

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -392,7 +392,8 @@ public class RemoteBlockReader2  implements BlockReader {
     // in and out will be closed when sock is closed (by the caller)
     // in and out will be closed when sock is closed (by the caller)
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
           ioStreams.out));
           ioStreams.out));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum);
 
 
     //
     //
     // Get bytes in block
     // Get bytes in block

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java

@@ -55,12 +55,15 @@ public interface DataTransferProtocol {
    * @param clientName client's name.
    * @param clientName client's name.
    * @param blockOffset offset of the block.
    * @param blockOffset offset of the block.
    * @param length maximum number of bytes for this read.
    * @param length maximum number of bytes for this read.
+   * @param sendChecksum if false, the DN should skip reading and sending
+   *        checksums
    */
    */
   public void readBlock(final ExtendedBlock blk,
   public void readBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final long blockOffset,
       final long blockOffset,
-      final long length) throws IOException;
+      final long length,
+      final boolean sendChecksum) throws IOException;
 
 
   /**
   /**
    * Write a block to a datanode pipeline.
    * Write a block to a datanode pipeline.

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -88,7 +88,8 @@ public abstract class Receiver implements DataTransferProtocol {
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
         proto.getOffset(),
         proto.getOffset(),
-        proto.getLen());
+        proto.getLen(),
+        proto.getSendChecksums());
   }
   }
   
   
   /** Receive OP_WRITE_BLOCK */
   /** Receive OP_WRITE_BLOCK */

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java

@@ -62,6 +62,10 @@ public class Sender implements DataTransferProtocol {
 
 
   private static void send(final DataOutputStream out, final Op opcode,
   private static void send(final DataOutputStream out, final Op opcode,
       final Message proto) throws IOException {
       final Message proto) throws IOException {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName()
+          + ": " + proto);
+    }
     op(out, opcode);
     op(out, opcode);
     proto.writeDelimitedTo(out);
     proto.writeDelimitedTo(out);
     out.flush();
     out.flush();
@@ -72,12 +76,14 @@ public class Sender implements DataTransferProtocol {
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final long blockOffset,
       final long blockOffset,
-      final long length) throws IOException {
+      final long length,
+      final boolean sendChecksum) throws IOException {
 
 
     OpReadBlockProto proto = OpReadBlockProto.newBuilder()
     OpReadBlockProto proto = OpReadBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
       .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
       .setOffset(blockOffset)
       .setOffset(blockOffset)
       .setLen(length)
       .setLen(length)
+      .setSendChecksums(sendChecksum)
       .build();
       .build();
 
 
     send(out, Op.READ_BLOCK, proto);
     send(out, Op.READ_BLOCK, proto);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -77,7 +77,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final ClientDatanodeProtocolPB rpcProxy;
   private final ClientDatanodeProtocolPB rpcProxy;
-  private final static RefreshNamenodesRequestProto REFRESH_NAMENODES = 
+  private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES = 
       RefreshNamenodesRequestProto.newBuilder().build();
       RefreshNamenodesRequestProto.newBuilder().build();
 
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
@@ -170,7 +170,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   @Override
   @Override
   public void refreshNamenodes() throws IOException {
   public void refreshNamenodes() throws IOException {
     try {
     try {
-      rpcProxy.refreshNamenodes(NULL_CONTROLLER, REFRESH_NAMENODES);
+      rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 89 - 81
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -40,8 +40,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -73,8 +71,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -107,8 +103,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
@@ -143,6 +137,12 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
@@ -171,6 +171,78 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE =
   static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE =
       DisallowSnapshotResponseProto.newBuilder().build();
       DisallowSnapshotResponseProto.newBuilder().build();
 
 
+  private static final CreateResponseProto VOID_CREATE_RESPONSE = 
+  CreateResponseProto.newBuilder().build();
+
+  private static final AppendResponseProto VOID_APPEND_RESPONSE = 
+  AppendResponseProto.newBuilder().build();
+
+  private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = 
+  SetPermissionResponseProto.newBuilder().build();
+
+  private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = 
+  SetOwnerResponseProto.newBuilder().build();
+
+  private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = 
+  AbandonBlockResponseProto.newBuilder().build();
+
+  private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = 
+  ReportBadBlocksResponseProto.newBuilder().build();
+
+  private static final ConcatResponseProto VOID_CONCAT_RESPONSE = 
+  ConcatResponseProto.newBuilder().build();
+
+  private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = 
+  Rename2ResponseProto.newBuilder().build();
+
+  private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = 
+  GetListingResponseProto.newBuilder().build();
+
+  private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = 
+  RenewLeaseResponseProto.newBuilder().build();
+
+  private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = 
+  SaveNamespaceResponseProto.newBuilder().build();
+
+  private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = 
+  RefreshNodesResponseProto.newBuilder().build();
+
+  private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = 
+  FinalizeUpgradeResponseProto.newBuilder().build();
+
+  private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = 
+  MetaSaveResponseProto.newBuilder().build();
+
+  private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = 
+  GetFileInfoResponseProto.newBuilder().build();
+
+  private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = 
+  GetFileLinkInfoResponseProto.newBuilder().build();
+
+  private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = 
+  SetQuotaResponseProto.newBuilder().build();
+
+  private static final FsyncResponseProto VOID_FSYNC_RESPONSE = 
+  FsyncResponseProto.newBuilder().build();
+
+  private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = 
+  SetTimesResponseProto.newBuilder().build();
+
+  private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = 
+  CreateSymlinkResponseProto.newBuilder().build();
+
+  private static final UpdatePipelineResponseProto
+    VOID_UPDATEPIPELINE_RESPONSE = 
+  UpdatePipelineResponseProto.newBuilder().build();
+
+  private static final CancelDelegationTokenResponseProto 
+      VOID_CANCELDELEGATIONTOKEN_RESPONSE = 
+          CancelDelegationTokenResponseProto.newBuilder().build();
+
+  private static final SetBalancerBandwidthResponseProto 
+      VOID_SETBALANCERBANDWIDTH_RESPONSE = 
+        SetBalancerBandwidthResponseProto.newBuilder().build();
+
   /**
   /**
    * Constructor
    * Constructor
    * 
    * 
@@ -215,9 +287,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
   
   
-  static final CreateResponseProto VOID_CREATE_RESPONSE = 
-      CreateResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public CreateResponseProto create(RpcController controller,
   public CreateResponseProto create(RpcController controller,
       CreateRequestProto req) throws ServiceException {
       CreateRequestProto req) throws ServiceException {
@@ -232,9 +301,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_CREATE_RESPONSE;
     return VOID_CREATE_RESPONSE;
   }
   }
   
   
-  static final AppendResponseProto NULL_APPEND_RESPONSE = 
-      AppendResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public AppendResponseProto append(RpcController controller,
   public AppendResponseProto append(RpcController controller,
       AppendRequestProto req) throws ServiceException {
       AppendRequestProto req) throws ServiceException {
@@ -244,7 +310,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return AppendResponseProto.newBuilder()
         return AppendResponseProto.newBuilder()
             .setBlock(PBHelper.convert(result)).build();
             .setBlock(PBHelper.convert(result)).build();
       }
       }
-      return NULL_APPEND_RESPONSE;
+      return VOID_APPEND_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -263,9 +329,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
 
 
-  static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = 
-      SetPermissionResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetPermissionResponseProto setPermission(RpcController controller,
   public SetPermissionResponseProto setPermission(RpcController controller,
       SetPermissionRequestProto req) throws ServiceException {
       SetPermissionRequestProto req) throws ServiceException {
@@ -277,9 +340,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_SET_PERM_RESPONSE;
     return VOID_SET_PERM_RESPONSE;
   }
   }
 
 
-  static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = 
-      SetOwnerResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetOwnerResponseProto setOwner(RpcController controller,
   public SetOwnerResponseProto setOwner(RpcController controller,
       SetOwnerRequestProto req) throws ServiceException {
       SetOwnerRequestProto req) throws ServiceException {
@@ -293,9 +353,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_SET_OWNER_RESPONSE;
     return VOID_SET_OWNER_RESPONSE;
   }
   }
 
 
-  static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = 
-      AbandonBlockResponseProto.newBuilder().build();
-
   @Override
   @Override
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
       AbandonBlockRequestProto req) throws ServiceException {
       AbandonBlockRequestProto req) throws ServiceException {
@@ -361,9 +418,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = 
-      ReportBadBlocksResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
   public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
       ReportBadBlocksRequestProto req) throws ServiceException {
       ReportBadBlocksRequestProto req) throws ServiceException {
@@ -377,9 +431,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_REP_BAD_BLOCK_RESPONSE;
     return VOID_REP_BAD_BLOCK_RESPONSE;
   }
   }
 
 
-  static final ConcatResponseProto VOID_CONCAT_RESPONSE = 
-      ConcatResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public ConcatResponseProto concat(RpcController controller,
   public ConcatResponseProto concat(RpcController controller,
       ConcatRequestProto req) throws ServiceException {
       ConcatRequestProto req) throws ServiceException {
@@ -403,9 +454,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final Rename2ResponseProto VOID_RENAME2_RESPONSE = 
-      Rename2ResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public Rename2ResponseProto rename2(RpcController controller,
   public Rename2ResponseProto rename2(RpcController controller,
       Rename2RequestProto req) throws ServiceException {
       Rename2RequestProto req) throws ServiceException {
@@ -442,8 +490,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final GetListingResponseProto NULL_GETLISTING_RESPONSE = 
-      GetListingResponseProto.newBuilder().build();
   @Override
   @Override
   public GetListingResponseProto getListing(RpcController controller,
   public GetListingResponseProto getListing(RpcController controller,
       GetListingRequestProto req) throws ServiceException {
       GetListingRequestProto req) throws ServiceException {
@@ -455,16 +501,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return GetListingResponseProto.newBuilder().setDirList(
         return GetListingResponseProto.newBuilder().setDirList(
           PBHelper.convert(result)).build();
           PBHelper.convert(result)).build();
       } else {
       } else {
-        return NULL_GETLISTING_RESPONSE;
+        return VOID_GETLISTING_RESPONSE;
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
   
   
-  static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = 
-      RenewLeaseResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public RenewLeaseResponseProto renewLease(RpcController controller,
   public RenewLeaseResponseProto renewLease(RpcController controller,
       RenewLeaseRequestProto req) throws ServiceException {
       RenewLeaseRequestProto req) throws ServiceException {
@@ -549,9 +592,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = 
-      SaveNamespaceResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SaveNamespaceResponseProto saveNamespace(RpcController controller,
   public SaveNamespaceResponseProto saveNamespace(RpcController controller,
       SaveNamespaceRequestProto req) throws ServiceException {
       SaveNamespaceRequestProto req) throws ServiceException {
@@ -578,9 +618,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
 
 
-  static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = 
-      RefreshNodesResponseProto.newBuilder().build();
-
   @Override
   @Override
   public RefreshNodesResponseProto refreshNodes(RpcController controller,
   public RefreshNodesResponseProto refreshNodes(RpcController controller,
       RefreshNodesRequestProto req) throws ServiceException {
       RefreshNodesRequestProto req) throws ServiceException {
@@ -593,9 +630,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
 
 
   }
   }
 
 
-  static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = 
-      FinalizeUpgradeResponseProto.newBuilder().build();
-
   @Override
   @Override
   public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
   public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
       FinalizeUpgradeRequestProto req) throws ServiceException {
       FinalizeUpgradeRequestProto req) throws ServiceException {
@@ -622,9 +656,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = 
-      MetaSaveResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public MetaSaveResponseProto metaSave(RpcController controller,
   public MetaSaveResponseProto metaSave(RpcController controller,
       MetaSaveRequestProto req) throws ServiceException {
       MetaSaveRequestProto req) throws ServiceException {
@@ -637,8 +668,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
 
 
   }
   }
 
 
-  static final GetFileInfoResponseProto NULL_GETFILEINFO_RESPONSE = 
-      GetFileInfoResponseProto.newBuilder().build();
   @Override
   @Override
   public GetFileInfoResponseProto getFileInfo(RpcController controller,
   public GetFileInfoResponseProto getFileInfo(RpcController controller,
       GetFileInfoRequestProto req) throws ServiceException {
       GetFileInfoRequestProto req) throws ServiceException {
@@ -649,14 +678,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return GetFileInfoResponseProto.newBuilder().setFs(
         return GetFileInfoResponseProto.newBuilder().setFs(
             PBHelper.convert(result)).build();
             PBHelper.convert(result)).build();
       }
       }
-      return NULL_GETFILEINFO_RESPONSE;      
+      return VOID_GETFILEINFO_RESPONSE;      
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
 
 
-  static final GetFileLinkInfoResponseProto NULL_GETFILELINKINFO_RESPONSE = 
-      GetFileLinkInfoResponseProto.newBuilder().build();
   @Override
   @Override
   public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
   public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
       GetFileLinkInfoRequestProto req) throws ServiceException {
       GetFileLinkInfoRequestProto req) throws ServiceException {
@@ -668,7 +695,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
             PBHelper.convert(result)).build();
             PBHelper.convert(result)).build();
       } else {
       } else {
         System.out.println("got  null result for getFileLinkInfo for " + req.getSrc());
         System.out.println("got  null result for getFileLinkInfo for " + req.getSrc());
-        return NULL_GETFILELINKINFO_RESPONSE;      
+        return VOID_GETFILELINKINFO_RESPONSE;      
       }
       }
 
 
     } catch (IOException e) {
     } catch (IOException e) {
@@ -689,9 +716,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = 
-      SetQuotaResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public SetQuotaResponseProto setQuota(RpcController controller,
   public SetQuotaResponseProto setQuota(RpcController controller,
       SetQuotaRequestProto req) throws ServiceException {
       SetQuotaRequestProto req) throws ServiceException {
@@ -704,9 +728,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final FsyncResponseProto VOID_FSYNC_RESPONSE = 
-      FsyncResponseProto.newBuilder().build();
-
   @Override
   @Override
   public FsyncResponseProto fsync(RpcController controller,
   public FsyncResponseProto fsync(RpcController controller,
       FsyncRequestProto req) throws ServiceException {
       FsyncRequestProto req) throws ServiceException {
@@ -718,9 +739,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = 
-      SetTimesResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetTimesResponseProto setTimes(RpcController controller,
   public SetTimesResponseProto setTimes(RpcController controller,
       SetTimesRequestProto req) throws ServiceException {
       SetTimesRequestProto req) throws ServiceException {
@@ -732,9 +750,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = 
-      CreateSymlinkResponseProto.newBuilder().build();
-
   @Override
   @Override
   public CreateSymlinkResponseProto createSymlink(RpcController controller,
   public CreateSymlinkResponseProto createSymlink(RpcController controller,
       CreateSymlinkRequestProto req) throws ServiceException {
       CreateSymlinkRequestProto req) throws ServiceException {
@@ -752,8 +767,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       GetLinkTargetRequestProto req) throws ServiceException {
       GetLinkTargetRequestProto req) throws ServiceException {
     try {
     try {
       String result = server.getLinkTarget(req.getPath());
       String result = server.getLinkTarget(req.getPath());
-      return GetLinkTargetResponseProto.newBuilder().setTargetPath(result)
-          .build();
+      GetLinkTargetResponseProto.Builder builder = GetLinkTargetResponseProto
+          .newBuilder();
+      if (result != null) {
+        builder.setTargetPath(result);
+      }
+      return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -774,9 +793,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final UpdatePipelineResponseProto VOID_UPDATEPIPELINE_RESPONSE = 
-      UpdatePipelineResponseProto.newBuilder().build();
-
   @Override
   @Override
   public UpdatePipelineResponseProto updatePipeline(RpcController controller,
   public UpdatePipelineResponseProto updatePipeline(RpcController controller,
       UpdatePipelineRequestProto req) throws ServiceException {
       UpdatePipelineRequestProto req) throws ServiceException {
@@ -818,16 +834,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       long result = server.renewDelegationToken(PBHelper
       long result = server.renewDelegationToken(PBHelper
           .convertDelegationToken(req.getToken()));
           .convertDelegationToken(req.getToken()));
       return RenewDelegationTokenResponseProto.newBuilder()
       return RenewDelegationTokenResponseProto.newBuilder()
-          .setNewExireTime(result).build();
+          .setNewExpiryTime(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
 
 
-  static final CancelDelegationTokenResponseProto 
-      VOID_CANCELDELEGATIONTOKEN_RESPONSE = 
-      CancelDelegationTokenResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public CancelDelegationTokenResponseProto cancelDelegationToken(
   public CancelDelegationTokenResponseProto cancelDelegationToken(
       RpcController controller, CancelDelegationTokenRequestProto req)
       RpcController controller, CancelDelegationTokenRequestProto req)
@@ -841,10 +853,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final SetBalancerBandwidthResponseProto 
-    VOID_SETBALANCERBANDWIDTH_RESPONSE = 
-      SetBalancerBandwidthResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetBalancerBandwidthResponseProto setBalancerBandwidth(
   public SetBalancerBandwidthResponseProto setBalancerBandwidth(
       RpcController controller, SetBalancerBandwidthRequestProto req)
       RpcController controller, SetBalancerBandwidthRequestProto req)

+ 42 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -70,14 +69,13 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
@@ -92,7 +90,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Refres
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
@@ -120,6 +117,10 @@ import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -136,6 +137,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
     ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
     ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
   final private ClientNamenodeProtocolPB rpcProxy;
   final private ClientNamenodeProtocolPB rpcProxy;
 
 
+  static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST = 
+  GetServerDefaultsRequestProto.newBuilder().build();
+
+  private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
+  GetFsStatusRequestProto.newBuilder().build();
+
+  private final static SaveNamespaceRequestProto VOID_SAVE_NAMESPACE_REQUEST =
+  SaveNamespaceRequestProto.newBuilder().build();
+
+  private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST = 
+  RollEditsRequestProto.getDefaultInstance();
+
+  private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST =
+  RefreshNodesRequestProto.newBuilder().build();
+
+  private final static FinalizeUpgradeRequestProto
+  VOID_FINALIZE_UPGRADE_REQUEST =
+      FinalizeUpgradeRequestProto.newBuilder().build();
+
+  private final static GetDataEncryptionKeyRequestProto
+  VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
+      GetDataEncryptionKeyRequestProto.newBuilder().build();
+
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
     rpcProxy = proxy;
     rpcProxy = proxy;
   }
   }
@@ -167,7 +191,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
-    GetServerDefaultsRequestProto req = GetServerDefaultsRequestProto.newBuilder().build();
+    GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
     try {
     try {
       return PBHelper
       return PBHelper
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
@@ -480,9 +504,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public long[] getStats() throws IOException {
   public long[] getStats() throws IOException {
-    GetFsStatusRequestProto req = GetFsStatusRequestProto.newBuilder().build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getFsStats(null, req));
+      return PBHelper.convert(rpcProxy.getFsStats(null,
+          VOID_GET_FSSTATUS_REQUEST));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -529,10 +553,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void saveNamespace() throws AccessControlException, IOException {
   public void saveNamespace() throws AccessControlException, IOException {
-    SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder()
-        .build();
     try {
     try {
-      rpcProxy.saveNamespace(null, req);
+      rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -540,9 +562,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
   
   
   @Override
   @Override
   public long rollEdits() throws AccessControlException, IOException {
   public long rollEdits() throws AccessControlException, IOException {
-    RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance();
     try {
     try {
-      RollEditsResponseProto resp = rpcProxy.rollEdits(null, req);
+      RollEditsResponseProto resp = rpcProxy.rollEdits(null,
+          VOID_ROLLEDITS_REQUEST);
       return resp.getNewSegmentTxId();
       return resp.getNewSegmentTxId();
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
@@ -564,9 +586,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshNodes() throws IOException {
   public void refreshNodes() throws IOException {
-    RefreshNodesRequestProto req = RefreshNodesRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshNodes(null, req);
+      rpcProxy.refreshNodes(null, VOID_REFRESH_NODES_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -574,9 +595,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void finalizeUpgrade() throws IOException {
   public void finalizeUpgrade() throws IOException {
-    FinalizeUpgradeRequestProto req = FinalizeUpgradeRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.finalizeUpgrade(null, req);
+      rpcProxy.finalizeUpgrade(null, VOID_FINALIZE_UPGRADE_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -722,7 +742,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
     GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
         .setPath(path).build();
         .setPath(path).build();
     try {
     try {
-      return rpcProxy.getLinkTarget(null, req).getTargetPath();
+      GetLinkTargetResponseProto rsp = rpcProxy.getLinkTarget(null, req);
+      return rsp.hasTargetPath() ? rsp.getTargetPath() : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -783,7 +804,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         setToken(PBHelper.convert(token)).
         setToken(PBHelper.convert(token)).
         build();
         build();
     try {
     try {
-      return rpcProxy.renewDelegationToken(null, req).getNewExireTime();
+      return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -824,12 +845,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
   
   
   @Override
   @Override
   public DataEncryptionKey getDataEncryptionKey() throws IOException {
   public DataEncryptionKey getDataEncryptionKey() throws IOException {
-    GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto
-        .newBuilder().build();
     try {
     try {
-      GetDataEncryptionKeyResponseProto rsp = 
-          rpcProxy.getDataEncryptionKey(null, req);
-      return rsp.hasDataEncryptionKey() ? 
+      GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
+          null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
+     return rsp.hasDataEncryptionKey() ? 
           PBHelper.convert(rsp.getDataEncryptionKey()) : null;
           PBHelper.convert(rsp.getDataEncryptionKey()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -84,7 +84,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   
   
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final DatanodeProtocolPB rpcProxy;
   private final DatanodeProtocolPB rpcProxy;
-  private static final VersionRequestProto VERSION_REQUEST = 
+  private static final VersionRequestProto VOID_VERSION_REQUEST = 
       VersionRequestProto.newBuilder().build();
       VersionRequestProto.newBuilder().build();
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   
   
@@ -243,7 +243,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   public NamespaceInfo versionRequest() throws IOException {
   public NamespaceInfo versionRequest() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
-          VERSION_REQUEST).getInfo());
+          VOID_VERSION_REQUEST).getInfo());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java

@@ -62,15 +62,17 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     DatanodeProtocolPB {
     DatanodeProtocolPB {
 
 
   private final DatanodeProtocol impl;
   private final DatanodeProtocol impl;
-  private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO = 
-      ErrorReportResponseProto.newBuilder().build();
+  private static final ErrorReportResponseProto
+      VOID_ERROR_REPORT_RESPONSE_PROTO = 
+          ErrorReportResponseProto.newBuilder().build();
   private static final BlockReceivedAndDeletedResponseProto 
   private static final BlockReceivedAndDeletedResponseProto 
-      BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
+      VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
           BlockReceivedAndDeletedResponseProto.newBuilder().build();
           BlockReceivedAndDeletedResponseProto.newBuilder().build();
-  private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE = 
-      ReportBadBlocksResponseProto.newBuilder().build();
+  private static final ReportBadBlocksResponseProto
+      VOID_REPORT_BAD_BLOCK_RESPONSE = 
+          ReportBadBlocksResponseProto.newBuilder().build();
   private static final CommitBlockSynchronizationResponseProto 
   private static final CommitBlockSynchronizationResponseProto 
-      COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
+      VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
           CommitBlockSynchronizationResponseProto.newBuilder().build();
           CommitBlockSynchronizationResponseProto.newBuilder().build();
 
 
   public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
   public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
@@ -180,7 +182,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return BLOCK_RECEIVED_AND_DELETE_RESPONSE;
+    return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -192,7 +194,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return ERROR_REPORT_RESPONSE_PROTO;
+    return VOID_ERROR_REPORT_RESPONSE_PROTO;
   }
   }
 
 
   @Override
   @Override
@@ -221,7 +223,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return REPORT_BAD_BLOCK_RESPONSE;
+    return VOID_REPORT_BAD_BLOCK_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -242,6 +244,6 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
+    return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
   }
   }
 }
 }

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java

@@ -42,6 +42,13 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
   /** Server side implementation to delegate the requests to */
   /** Server side implementation to delegate the requests to */
   private final JournalProtocol impl;
   private final JournalProtocol impl;
 
 
+  private final static JournalResponseProto VOID_JOURNAL_RESPONSE = 
+  JournalResponseProto.newBuilder().build();
+
+  private final static StartLogSegmentResponseProto
+  VOID_START_LOG_SEGMENT_RESPONSE =
+      StartLogSegmentResponseProto.newBuilder().build();
+
   public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
   public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -56,7 +63,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return JournalResponseProto.newBuilder().build();
+    return VOID_JOURNAL_RESPONSE;
   }
   }
 
 
   /** @see JournalProtocol#startLogSegment */
   /** @see JournalProtocol#startLogSegment */
@@ -69,7 +76,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return StartLogSegmentResponseProto.newBuilder().build();
+    return VOID_START_LOG_SEGMENT_RESPONSE;
   }
   }
 
 
   @Override
   @Override

+ 14 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java

@@ -63,6 +63,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     NamenodeProtocolPB {
     NamenodeProtocolPB {
   private final NamenodeProtocol impl;
   private final NamenodeProtocol impl;
 
 
+  private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE = 
+  ErrorReportResponseProto.newBuilder().build();
+
+  private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE =
+  EndCheckpointResponseProto.newBuilder().build();
+
   public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
   public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -91,8 +97,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return GetBlockKeysResponseProto.newBuilder()
-        .setKeys(PBHelper.convert(keys)).build();
+    GetBlockKeysResponseProto.Builder builder = 
+        GetBlockKeysResponseProto.newBuilder();
+    if (keys != null) {
+      builder.setKeys(PBHelper.convert(keys));
+    }
+    return builder.build();
   }
   }
 
 
   @Override
   @Override
@@ -143,7 +153,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return ErrorReportResponseProto.newBuilder().build();
+    return VOID_ERROR_REPORT_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -181,7 +191,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return EndCheckpointResponseProto.newBuilder().build();
+    return VOID_END_CHECKPOINT_RESPONSE;
   }
   }
 
 
   @Override
   @Override

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
@@ -67,13 +68,13 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   /*
   /*
    * Protobuf requests with no parameters instantiated only once
    * Protobuf requests with no parameters instantiated only once
    */
    */
-  private static final GetBlockKeysRequestProto GET_BLOCKKEYS = 
+  private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST = 
       GetBlockKeysRequestProto.newBuilder().build();
       GetBlockKeysRequestProto.newBuilder().build();
-  private static final GetTransactionIdRequestProto GET_TRANSACTIONID = 
+  private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST = 
       GetTransactionIdRequestProto.newBuilder().build();
       GetTransactionIdRequestProto.newBuilder().build();
-  private static final RollEditLogRequestProto ROLL_EDITLOG = 
+  private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST = 
       RollEditLogRequestProto.newBuilder().build();
       RollEditLogRequestProto.newBuilder().build();
-  private static final VersionRequestProto VERSION_REQUEST = 
+  private static final VersionRequestProto VOID_VERSION_REQUEST = 
       VersionRequestProto.newBuilder().build();
       VersionRequestProto.newBuilder().build();
 
 
   final private NamenodeProtocolPB rpcProxy;
   final private NamenodeProtocolPB rpcProxy;
@@ -104,8 +105,9 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   @Override
   @Override
   public ExportedBlockKeys getBlockKeys() throws IOException {
   public ExportedBlockKeys getBlockKeys() throws IOException {
     try {
     try {
-      return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
-          GET_BLOCKKEYS).getKeys());
+      GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
+          VOID_GET_BLOCKKEYS_REQUEST);
+      return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -114,8 +116,8 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   @Override
   @Override
   public long getTransactionID() throws IOException {
   public long getTransactionID() throws IOException {
     try {
     try {
-      return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
-          .getTxId();
+      return rpcProxy.getTransactionId(NULL_CONTROLLER,
+          VOID_GET_TRANSACTIONID_REQUEST).getTxId();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -135,7 +137,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   public CheckpointSignature rollEditLog() throws IOException {
   public CheckpointSignature rollEditLog() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
-          ROLL_EDITLOG).getSignature());
+          VOID_ROLL_EDITLOG_REQUEST).getSignature());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -145,7 +147,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   public NamespaceInfo versionRequest() throws IOException {
   public NamespaceInfo versionRequest() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
-          VERSION_REQUEST).getInfo());
+          VOID_VERSION_REQUEST).getInfo());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java

@@ -38,6 +38,10 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
   private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
   
   
+  private final static RefreshServiceAclRequestProto
+  VOID_REFRESH_SERVICE_ACL_REQUEST =
+      RefreshServiceAclRequestProto.newBuilder().build();
+
   public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
   public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
       RefreshAuthorizationPolicyProtocolPB rpcProxy) {
       RefreshAuthorizationPolicyProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
     this.rpcProxy = rpcProxy;
@@ -50,10 +54,9 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshServiceAcl() throws IOException {
   public void refreshServiceAcl() throws IOException {
-    RefreshServiceAclRequestProto request = RefreshServiceAclRequestProto
-        .newBuilder().build();
     try {
     try {
-      rpcProxy.refreshServiceAcl(NULL_CONTROLLER, request);
+      rpcProxy.refreshServiceAcl(NULL_CONTROLLER,
+          VOID_REFRESH_SERVICE_ACL_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java

@@ -32,6 +32,10 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
 
 
   private final RefreshAuthorizationPolicyProtocol impl;
   private final RefreshAuthorizationPolicyProtocol impl;
 
 
+  private final static RefreshServiceAclResponseProto
+  VOID_REFRESH_SERVICE_ACL_RESPONSE = RefreshServiceAclResponseProto
+      .newBuilder().build();
+
   public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
   public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
       RefreshAuthorizationPolicyProtocol impl) {
       RefreshAuthorizationPolicyProtocol impl) {
     this.impl = impl;
     this.impl = impl;
@@ -46,6 +50,6 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshServiceAclResponseProto.newBuilder().build();
+    return VOID_REFRESH_SERVICE_ACL_RESPONSE;
   }
   }
 }
 }

+ 12 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java

@@ -39,6 +39,14 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final RefreshUserMappingsProtocolPB rpcProxy;
   private final RefreshUserMappingsProtocolPB rpcProxy;
   
   
+  private final static RefreshUserToGroupsMappingsRequestProto 
+  VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST = 
+      RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
+
+  private final static RefreshSuperUserGroupsConfigurationRequestProto
+  VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST = 
+      RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
+
   public RefreshUserMappingsProtocolClientSideTranslatorPB(
   public RefreshUserMappingsProtocolClientSideTranslatorPB(
       RefreshUserMappingsProtocolPB rpcProxy) {
       RefreshUserMappingsProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
     this.rpcProxy = rpcProxy;
@@ -51,10 +59,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshUserToGroupsMappings() throws IOException {
   public void refreshUserToGroupsMappings() throws IOException {
-    RefreshUserToGroupsMappingsRequestProto request = 
-        RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER, request);
+      rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER,
+          VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }
@@ -62,10 +69,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshSuperUserGroupsConfiguration() throws IOException {
   public void refreshSuperUserGroupsConfiguration() throws IOException {
-    RefreshSuperUserGroupsConfigurationRequestProto request = 
-        RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER, request);
+      rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER,
+          VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java

@@ -33,6 +33,15 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
 
 
   private final RefreshUserMappingsProtocol impl;
   private final RefreshUserMappingsProtocol impl;
   
   
+  private final static RefreshUserToGroupsMappingsResponseProto 
+  VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE =
+      RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
+
+  private final static RefreshSuperUserGroupsConfigurationResponseProto
+  VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE = 
+      RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
+      .build();
+
   public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
   public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -47,7 +56,7 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
+    return VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -60,7 +69,6 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
-        .build();
+    return VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE;
   }
   }
 }
 }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html


+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java

@@ -65,6 +65,13 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
   /** Server side implementation to delegate the requests to */
   /** Server side implementation to delegate the requests to */
   private final QJournalProtocol impl;
   private final QJournalProtocol impl;
 
 
+  private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
+  JournalResponseProto.newBuilder().build();
+
+  private final static StartLogSegmentResponseProto
+  VOID_START_LOG_SEGMENT_RESPONSE =
+      StartLogSegmentResponseProto.newBuilder().build();
+
   public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) {
   public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -135,7 +142,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return JournalResponseProto.newBuilder().build();
+    return VOID_JOURNAL_RESPONSE;
   }
   }
 
 
   /** @see JournalProtocol#heartbeat */
   /** @see JournalProtocol#heartbeat */
@@ -160,7 +167,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return StartLogSegmentResponseProto.newBuilder().build();
+    return VOID_START_LOG_SEGMENT_RESPONSE;
   }
   }
   
   
   @Override
   @Override

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -395,7 +395,7 @@ class BPOfferService {
   }
   }
 
 
   @VisibleForTesting
   @VisibleForTesting
-  synchronized List<BPServiceActor> getBPServiceActors() {
+  List<BPServiceActor> getBPServiceActors() {
     return Lists.newArrayList(bpServices);
     return Lists.newArrayList(bpServices);
   }
   }
   
   

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -388,8 +388,8 @@ class BlockPoolSliceScanner {
       try {
       try {
         adjustThrottler();
         adjustThrottler();
         
         
-        blockSender = new BlockSender(block, 0, -1, false, true, datanode,
-            null);
+        blockSender = new BlockSender(block, 0, -1, false, true, true, 
+            datanode, null);
 
 
         DataOutputStream out = 
         DataOutputStream out = 
                 new DataOutputStream(new IOUtils.NullOutputStream());
                 new DataOutputStream(new IOUtils.NullOutputStream());

+ 41 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -45,6 +45,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
+import com.google.common.base.Preconditions;
+
 /**
 /**
  * Reads a block from the disk and sends it to a recipient.
  * Reads a block from the disk and sends it to a recipient.
  * 
  * 
@@ -158,12 +160,14 @@ class BlockSender implements java.io.Closeable {
    * @param length length of data to read
    * @param length length of data to read
    * @param corruptChecksumOk
    * @param corruptChecksumOk
    * @param verifyChecksum verify checksum while reading the data
    * @param verifyChecksum verify checksum while reading the data
+   * @param sendChecksum send checksum to client.
    * @param datanode datanode from which the block is being read
    * @param datanode datanode from which the block is being read
    * @param clientTraceFmt format string used to print client trace logs
    * @param clientTraceFmt format string used to print client trace logs
    * @throws IOException
    * @throws IOException
    */
    */
   BlockSender(ExtendedBlock block, long startOffset, long length,
   BlockSender(ExtendedBlock block, long startOffset, long length,
               boolean corruptChecksumOk, boolean verifyChecksum,
               boolean corruptChecksumOk, boolean verifyChecksum,
+              boolean sendChecksum,
               DataNode datanode, String clientTraceFmt)
               DataNode datanode, String clientTraceFmt)
       throws IOException {
       throws IOException {
     try {
     try {
@@ -175,6 +179,13 @@ class BlockSender implements java.io.Closeable {
       this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
       this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
       this.datanode = datanode;
       this.datanode = datanode;
       
       
+      if (verifyChecksum) {
+        // To simplify implementation, callers may not specify verification
+        // without sending.
+        Preconditions.checkArgument(sendChecksum,
+            "If verifying checksum, currently must also send it.");
+      }
+      
       final Replica replica;
       final Replica replica;
       final long replicaVisibleLength;
       final long replicaVisibleLength;
       synchronized(datanode.data) { 
       synchronized(datanode.data) { 
@@ -213,29 +224,37 @@ class BlockSender implements java.io.Closeable {
        * False,  True: will verify checksum
        * False,  True: will verify checksum
        * False, False: throws IOException file not found
        * False, False: throws IOException file not found
        */
        */
-      DataChecksum csum;
-      final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
-      if (!corruptChecksumOk || metaIn != null) {
-      	if (metaIn == null) {
-          //need checksum but meta-data not found
-          throw new FileNotFoundException("Meta-data not found for " + block);
-        } 
-      	
-        checksumIn = new DataInputStream(
-            new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
-
-        // read and handle the common header here. For now just a version
-        BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
-        short version = header.getVersion();
-        if (version != BlockMetadataHeader.VERSION) {
-          LOG.warn("Wrong version (" + version + ") for metadata file for "
-              + block + " ignoring ...");
+      DataChecksum csum = null;
+      if (verifyChecksum || sendChecksum) {
+        final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
+        if (!corruptChecksumOk || metaIn != null) {
+          if (metaIn == null) {
+            //need checksum but meta-data not found
+            throw new FileNotFoundException("Meta-data not found for " + block);
+          }
+
+          checksumIn = new DataInputStream(
+              new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
+  
+          // read and handle the common header here. For now just a version
+          BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
+          short version = header.getVersion();
+          if (version != BlockMetadataHeader.VERSION) {
+            LOG.warn("Wrong version (" + version + ") for metadata file for "
+                + block + " ignoring ...");
+          }
+          csum = header.getChecksum();
+        } else {
+          LOG.warn("Could not find metadata file for " + block);
         }
         }
-        csum = header.getChecksum();
-      } else {
-        LOG.warn("Could not find metadata file for " + block);
-        // This only decides the buffer size. Use BUFFER_SIZE?
-        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 16 * 1024);
+      }
+      if (csum == null) {
+        // The number of bytes per checksum here determines the alignment
+        // of reads: we always start reading at a checksum chunk boundary,
+        // even if the checksum type is NULL. So, choosing too big of a value
+        // would risk sending too much unnecessary data. 512 (1 disk sector)
+        // is likely to result in minimal extra IO.
+        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
       }
       }
 
 
       /*
       /*

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -1441,7 +1441,7 @@ public class DataNode extends Configured
             HdfsConstants.SMALL_BUFFER_SIZE));
             HdfsConstants.SMALL_BUFFER_SIZE));
         in = new DataInputStream(unbufIn);
         in = new DataInputStream(unbufIn);
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
-            false, false, DataNode.this, null);
+            false, false, true, DataNode.this, null);
         DatanodeInfo srcNode = new DatanodeInfo(bpReg);
         DatanodeInfo srcNode = new DatanodeInfo(bpReg);
 
 
         //
         //

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -241,7 +241,8 @@ class DataXceiver extends Receiver implements Runnable {
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final long blockOffset,
       final long blockOffset,
-      final long length) throws IOException {
+      final long length,
+      final boolean sendChecksum) throws IOException {
     previousOpClientName = clientName;
     previousOpClientName = clientName;
 
 
     OutputStream baseStream = getOutputStream();
     OutputStream baseStream = getOutputStream();
@@ -266,7 +267,7 @@ class DataXceiver extends Receiver implements Runnable {
     try {
     try {
       try {
       try {
         blockSender = new BlockSender(block, blockOffset, length,
         blockSender = new BlockSender(block, blockOffset, length,
-            true, false, datanode, clientTraceFmt);
+            true, false, sendChecksum, datanode, clientTraceFmt);
       } catch(IOException e) {
       } catch(IOException e) {
         String msg = "opReadBlock " + block + " received exception " + e; 
         String msg = "opReadBlock " + block + " received exception " + e; 
         LOG.info(msg);
         LOG.info(msg);
@@ -654,7 +655,7 @@ class DataXceiver extends Receiver implements Runnable {
 
 
     try {
     try {
       // check if the block exists or not
       // check if the block exists or not
-      blockSender = new BlockSender(block, 0, -1, false, false, datanode, 
+      blockSender = new BlockSender(block, 0, -1, false, false, true, datanode, 
           null);
           null);
 
 
       // set up response stream
       // set up response stream

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;

+ 1 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.net.URL;
 import java.net.URL;
 
 
-import javax.net.SocketFactory;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
@@ -33,14 +32,11 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.ServletUtil;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
@@ -116,18 +112,11 @@ public class FileChecksumServlets {
       final DataNode datanode = (DataNode) context.getAttribute("datanode");
       final DataNode datanode = (DataNode) context.getAttribute("datanode");
       final Configuration conf = 
       final Configuration conf = 
         new HdfsConfiguration(datanode.getConf());
         new HdfsConfiguration(datanode.getConf());
-      final int socketTimeout = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
-          HdfsServerConstants.READ_TIMEOUT);
-      final SocketFactory socketFactory = NetUtils.getSocketFactory(conf,
-          ClientProtocol.class);
       
       
       try {
       try {
         final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
         final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
             datanode, conf, getUGI(request, conf));
             datanode, conf, getUGI(request, conf));
-        final ClientProtocol nnproxy = dfs.getNamenode();
-        final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
-            path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey(), false);
+        final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path);
         MD5MD5CRC32FileChecksum.write(xml, checksum);
         MD5MD5CRC32FileChecksum.write(xml, checksum);
       } catch(IOException ioe) {
       } catch(IOException ioe) {
         writeXml(ioe, path, xml);
         writeXml(ioe, path, xml);

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -281,6 +281,17 @@ public class SecondaryNameNode implements Runnable {
     LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
     LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
   }
   }
 
 
+  /**
+   * Wait for the service to finish.
+   * (Normally, it runs forever.)
+   */
+  private void join() {
+    try {
+      infoServer.join();
+    } catch (InterruptedException ie) {
+    }
+  }
+
   /**
   /**
    * Shut down this instance of the datanode.
    * Shut down this instance of the datanode.
    * Returns only after shutdown is complete.
    * Returns only after shutdown is complete.
@@ -607,6 +618,7 @@ public class SecondaryNameNode implements Runnable {
 
 
     if (secondary != null) {
     if (secondary != null) {
       secondary.startCheckpointThread();
       secondary.startCheckpointThread();
+      secondary.join();
     }
     }
   }
   }
   
   

+ 8 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -168,7 +168,7 @@ message RenameRequestProto {
   required string dst = 2;
   required string dst = 2;
 }
 }
 
 
-message RenameResponseProto { // void response
+message RenameResponseProto {
   required bool result = 1;
   required bool result = 1;
 }
 }
 
 
@@ -393,7 +393,7 @@ message GetLinkTargetRequestProto {
   required string path = 1;
   required string path = 1;
 }
 }
 message GetLinkTargetResponseProto {
 message GetLinkTargetResponseProto {
-  required string targetPath = 1;
+  optional string targetPath = 1;
 }
 }
 
 
 message UpdateBlockForPipelineRequestProto {
 message UpdateBlockForPipelineRequestProto {
@@ -415,29 +415,6 @@ message UpdatePipelineRequestProto {
 message UpdatePipelineResponseProto { // void response
 message UpdatePipelineResponseProto { // void response
 }
 }
 
 
-message GetDelegationTokenRequestProto {
-  required string renewer = 1;
-}
-
-message GetDelegationTokenResponseProto {
-  optional hadoop.common.TokenProto token = 1;
-}
-
-message RenewDelegationTokenRequestProto {
-  required hadoop.common.TokenProto token = 1;
-}
-
-message RenewDelegationTokenResponseProto {
-  required uint64 newExireTime = 1;
-}
-
-message CancelDelegationTokenRequestProto {
-  required hadoop.common.TokenProto token = 1;
-}
-
-message CancelDelegationTokenResponseProto { // void response
-}
-
 message SetBalancerBandwidthRequestProto {
 message SetBalancerBandwidthRequestProto {
   required int64 bandwidth = 1;
   required int64 bandwidth = 1;
 }
 }
@@ -554,12 +531,12 @@ service ClientNamenodeProtocol {
       returns(UpdateBlockForPipelineResponseProto);
       returns(UpdateBlockForPipelineResponseProto);
   rpc updatePipeline(UpdatePipelineRequestProto)
   rpc updatePipeline(UpdatePipelineRequestProto)
       returns(UpdatePipelineResponseProto);
       returns(UpdatePipelineResponseProto);
-  rpc getDelegationToken(GetDelegationTokenRequestProto)
-      returns(GetDelegationTokenResponseProto);
-  rpc renewDelegationToken(RenewDelegationTokenRequestProto)
-      returns(RenewDelegationTokenResponseProto);
-  rpc cancelDelegationToken(CancelDelegationTokenRequestProto)
-      returns(CancelDelegationTokenResponseProto);
+  rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto)
+      returns(hadoop.common.GetDelegationTokenResponseProto);
+  rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto)
+      returns(hadoop.common.RenewDelegationTokenResponseProto);
+  rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto)
+      returns(hadoop.common.CancelDelegationTokenResponseProto);
   rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
   rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
       returns(SetBalancerBandwidthResponseProto);
       returns(SetBalancerBandwidthResponseProto);
   rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
   rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto

@@ -56,7 +56,7 @@ message GetBlockKeysRequestProto {
  * keys - Information about block keys at the active namenode
  * keys - Information about block keys at the active namenode
  */
  */
 message GetBlockKeysResponseProto {
 message GetBlockKeysResponseProto {
-  required ExportedBlockKeysProto keys = 1;
+  optional ExportedBlockKeysProto keys = 1;
 }
 }
 
 
 /**
 /**

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto

@@ -52,6 +52,7 @@ message OpReadBlockProto {
   required ClientOperationHeaderProto header = 1;
   required ClientOperationHeaderProto header = 1;
   required uint64 offset = 2;
   required uint64 offset = 2;
   required uint64 len = 3;
   required uint64 len = 3;
+  optional bool sendChecksums = 4 [default = true];
 }
 }
 
 
 
 
@@ -182,5 +183,5 @@ message OpBlockChecksumResponseProto {
   required uint32 bytesPerCrc = 1;
   required uint32 bytesPerCrc = 1;
   required uint64 crcPerBlock = 2;
   required uint64 crcPerBlock = 2;
   required bytes md5 = 3;
   required bytes md5 = 3;
-  optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32];
+  optional ChecksumTypeProto crcType = 4;
 }
 }

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier

@@ -1,2 +1,15 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
 org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
 org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
 org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -1,3 +1,16 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.hdfs.DFSClient$Renewer
 org.apache.hadoop.hdfs.DFSClient$Renewer
 org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
 org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
 org.apache.hadoop.hdfs.HftpFileSystem$TokenManager
 org.apache.hadoop.hdfs.HftpFileSystem$TokenManager

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj


+ 1 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -48,7 +48,6 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
-import java.net.ServerSocket;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
@@ -2290,19 +2289,6 @@ public class MiniDFSCluster {
     return nameNodes[nnIndex].nameNode;
     return nameNodes[nnIndex].nameNode;
   }
   }
   
   
-  private int getFreeSocketPort() {
-    int port = 0;
-    try {
-      ServerSocket s = new ServerSocket(0);
-      port = s.getLocalPort();
-      s.close();
-      return port;
-    } catch (IOException e) {
-      // Could not get a free port. Return default port 0.
-    }
-    return port;
-  }
-  
   protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
   protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
                            boolean checkDataNodeAddrConfig) throws IOException {
                            boolean checkDataNodeAddrConfig) throws IOException {
     if (setupHostsFile) {
     if (setupHostsFile) {
@@ -2311,7 +2297,7 @@ public class MiniDFSCluster {
         throw new IOException("Parameter dfs.hosts is not setup in conf");
         throw new IOException("Parameter dfs.hosts is not setup in conf");
       }
       }
       // Setup datanode in the include file, if it is defined in the conf
       // Setup datanode in the include file, if it is defined in the conf
-      String address = "127.0.0.1:" + getFreeSocketPort();
+      String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
       if (checkDataNodeAddrConfig) {
       if (checkDataNodeAddrConfig) {
         conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
         conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
       } else {
       } else {

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -444,21 +444,21 @@ public class TestDataTransferProtocol {
     recvBuf.reset();
     recvBuf.reset();
     blk.setBlockId(blkid-1);
     blk.setBlockId(blkid-1);
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen);
+        0L, fileLen, true);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
 
     // negative block start offset -1L
     // negative block start offset -1L
     sendBuf.reset();
     sendBuf.reset();
     blk.setBlockId(blkid);
     blk.setBlockId(blkid);
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        -1L, fileLen);
+        -1L, fileLen, true);
     sendRecvData("Negative start-offset for read for block " + 
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
                  firstBlock.getBlockId(), false);
 
 
     // bad block start offset
     // bad block start offset
     sendBuf.reset();
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        fileLen, fileLen);
+        fileLen, fileLen, true);
     sendRecvData("Wrong start-offset for reading block " +
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
                  firstBlock.getBlockId(), false);
     
     
@@ -475,7 +475,7 @@ public class TestDataTransferProtocol {
     
     
     sendBuf.reset();
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, -1L-random.nextInt(oneMil));
+        0L, -1L-random.nextInt(oneMil), true);
     sendRecvData("Negative length for reading block " +
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
                  firstBlock.getBlockId(), false);
     
     
@@ -488,14 +488,14 @@ public class TestDataTransferProtocol {
         recvOut);
         recvOut);
     sendBuf.reset();
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen+1);
+        0L, fileLen+1, true);
     sendRecvData("Wrong length for reading block " +
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
                  firstBlock.getBlockId(), false);
     
     
     //At the end of all this, read the file to make sure that succeeds finally.
     //At the end of all this, read the file to make sure that succeeds finally.
     sendBuf.reset();
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen);
+        0L, fileLen, true);
     readFile(fileSys, file, fileLen);
     readFile(fileSys, file, fileLen);
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
@@ -56,4 +59,11 @@ public class TestParallelRead extends TestParallelReadUtil {
   public void testParallelReadMixed() throws IOException {
   public void testParallelReadMixed() throws IOException {
     runTestWorkload(new MixedWorkloadHelper());
     runTestWorkload(new MixedWorkloadHelper());
   }
   }
+  
+  @Test
+  public void testParallelNoChecksums() throws IOException {
+    verifyChecksums = false;
+    runTestWorkload(new MixedWorkloadHelper());
+  }
+
 }
 }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java

@@ -46,6 +46,7 @@ public class TestParallelReadUtil {
   static final int FILE_SIZE_K = 256;
   static final int FILE_SIZE_K = 256;
   static Random rand = null;
   static Random rand = null;
   static final int DEFAULT_REPLICATION_FACTOR = 2;
   static final int DEFAULT_REPLICATION_FACTOR = 2;
+  protected boolean verifyChecksums = true;
 
 
   static {
   static {
     // The client-trace log ends up causing a lot of blocking threads
     // The client-trace log ends up causing a lot of blocking threads
@@ -317,7 +318,8 @@ public class TestParallelReadUtil {
 
 
       testInfo.filepath = new Path("/TestParallelRead.dat." + i);
       testInfo.filepath = new Path("/TestParallelRead.dat." + i);
       testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
       testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
-      testInfo.dis = dfsClient.open(testInfo.filepath.toString());
+      testInfo.dis = dfsClient.open(testInfo.filepath.toString(),
+          dfsClient.dfsClientConf.ioBufferSize, verifyChecksums);
 
 
       for (int j = 0; j < nWorkerEach; ++j) {
       for (int j = 0; j < nWorkerEach; ++j) {
         workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);
         workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);

+ 15 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java

@@ -24,11 +24,14 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
 
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.log4j.Level;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -194,11 +197,19 @@ public class TestPread {
    */
    */
   @Test
   @Test
   public void testPreadDFS() throws IOException {
   public void testPreadDFS() throws IOException {
-    dfsPreadTest(false); //normal pread
-    dfsPreadTest(true); //trigger read code path without transferTo.
+    dfsPreadTest(false, true); //normal pread
+    dfsPreadTest(true, true); //trigger read code path without transferTo.
   }
   }
   
   
-  private void dfsPreadTest(boolean disableTransferTo) throws IOException {
+  @Test
+  public void testPreadDFSNoChecksum() throws IOException {
+    ((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
+    dfsPreadTest(false, false);
+    dfsPreadTest(true, false);
+  }
+  
+  private void dfsPreadTest(boolean disableTransferTo, boolean verifyChecksum)
+      throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
@@ -210,6 +221,7 @@ public class TestPread {
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fileSys = cluster.getFileSystem();
     FileSystem fileSys = cluster.getFileSystem();
+    fileSys.setVerifyChecksum(verifyChecksum);
     try {
     try {
       Path file1 = new Path("preadtest.dat");
       Path file1 = new Path("preadtest.dat");
       writeFile(fileSys, file1);
       writeFile(fileSys, file1);

+ 18 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.tools;
 package org.apache.hadoop.tools;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -37,4 +54,4 @@ public class FakeRenewer extends TokenRenewer {
     lastRenewed = null;
     lastRenewed = null;
     lastCanceled = null;
     lastCanceled = null;
   }
   }
-}
+}

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.tools.FakeRenewer
 org.apache.hadoop.tools.FakeRenewer

+ 0 - 3
hadoop-hdfs-project/pom.xml

@@ -48,9 +48,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
     </plugins>
     </plugins>

+ 45 - 4
hadoop-mapreduce-project/CHANGES.txt

@@ -19,6 +19,8 @@ Trunk (Unreleased)
     MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions
     MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions
     with poor implementations of Object#hashCode().  (Radim Kolar via cutting)
     with poor implementations of Object#hashCode().  (Radim Kolar via cutting)
 
 
+    MAPREDUCE-4808. Refactor MapOutput and MergeManager to facilitate reuse by Shuffle implementations. (masokan via tucu)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
     MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
@@ -151,9 +153,6 @@ Trunk (Unreleased)
 
 
     MAPREDUCE-3223. Remove MR1 configs from mapred-default.xml (tlipcon via harsh)
     MAPREDUCE-3223. Remove MR1 configs from mapred-default.xml (tlipcon via harsh)
 
 
-    MAPREDUCE-4678. Running the Pentomino example with defaults throws
-    java.lang.NegativeArraySizeException (Chris McConnell via harsh)
-
     MAPREDUCE-4695. Fix LocalRunner on trunk after MAPREDUCE-3223 broke it
     MAPREDUCE-4695. Fix LocalRunner on trunk after MAPREDUCE-3223 broke it
     (harsh)
     (harsh)
 
 
@@ -170,6 +169,9 @@ Release 2.0.3-alpha - Unreleased
     MAPREDUCE-4123. Remove the 'mapred groups' command, which is no longer
     MAPREDUCE-4123. Remove the 'mapred groups' command, which is no longer
     supported. (Devaraj K via sseth)
     supported. (Devaraj K via sseth)
 
 
+    MAPREDUCE-4938. Use token request messages defined in hadoop common.
+    (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
     MAPREDUCE-4520. Added support for MapReduce applications to request for
     MAPREDUCE-4520. Added support for MapReduce applications to request for
@@ -207,6 +209,8 @@ Release 2.0.3-alpha - Unreleased
     MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus
     MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus
     calls. (sandyr via tucu)
     calls. (sandyr via tucu)
 
 
+    MAPREDUCE-4949. Enable multiple pi jobs to run in parallel. (sandyr via tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -253,6 +257,17 @@ Release 2.0.3-alpha - Unreleased
     MAPREDUCE-1700. User supplied dependencies may conflict with MapReduce
     MAPREDUCE-1700. User supplied dependencies may conflict with MapReduce
     system JARs. (tomwhite)
     system JARs. (tomwhite)
 
 
+    MAPREDUCE-4936. JobImpl uber checks for cpu are wrong (Arun C Murthy via
+    jlowe)
+
+    MAPREDUCE-4924. flakey test: org.apache.hadoop.mapred.TestClusterMRNotification.testMR. 
+    (rkanter via tucu)
+
+    MAPREDUCE-4923. Add toString method to TaggedInputSplit. (sandyr via tucu)
+
+    MAPREDUCE-4948. Fix a failing unit test TestYARNRunner.testHistoryServerToken.
+    (Junping Du via sseth)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -629,6 +644,24 @@ Release 2.0.0-alpha - 05-23-2012
     MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is
     MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is
     bad (Jason Lowe via bobby)
     bad (Jason Lowe via bobby)
 
 
+Release 0.23.7 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+    MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
+    number of map completion event type conversions. (Jason Lowe via sseth)
+
+  BUG FIXES
+
+    MAPREDUCE-4458. Warn if java.library.path is used for AM or Task
+    (Robert Parker via jeagles)
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -694,7 +727,15 @@ Release 0.23.6 - UNRELEASED
     MAPREDUCE-4921. JobClient should acquire HS token with RM principal 
     MAPREDUCE-4921. JobClient should acquire HS token with RM principal 
     (daryn via bobby)
     (daryn via bobby)
 
 
-Release 0.23.5 - UNRELEASED
+    MAPREDUCE-4934. Maven RAT plugin is not checking all source files (tgraves)
+
+    MAPREDUCE-4678. Running the Pentomino example with defaults throws
+    java.lang.NegativeArraySizeException (Chris McConnell via harsh)
+
+    MAPREDUCE-4925. The pentomino option parser may be buggy.
+    (Karthik Kambatla via harsh)
+
+Release 0.23.5 - 2012-11-28
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 

+ 13 - 0
hadoop-mapreduce-project/conf/mapred-site.xml.template

@@ -1,5 +1,18 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
 
 
 <!-- Put site-specific property overrides in this file. -->
 <!-- Put site-specific property overrides in this file. -->
 
 

+ 1 - 1
hadoop-mapreduce-project/dev-support/findbugs-exclude.xml

@@ -268,7 +268,7 @@
      This class is unlikely to get subclassed, so ignore
      This class is unlikely to get subclassed, so ignore
     -->
     -->
      <Match>
      <Match>
-       <Class name="org.apache.hadoop.mapreduce.task.reduce.MergeManager" />
+       <Class name="org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl" />
        <Bug pattern="SC_START_IN_CTOR" />
        <Bug pattern="SC_START_IN_CTOR" />
      </Match>
      </Match>
 
 

+ 2 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java

@@ -275,14 +275,13 @@ public class TaskAttemptListenerImpl extends CompositeService
     boolean shouldReset = false;
     boolean shouldReset = false;
     org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
       TypeConverter.toYarn(taskAttemptID);
       TypeConverter.toYarn(taskAttemptID);
-    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[] events =
+    TaskCompletionEvent[] events =
         context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
         context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
             startIndex, maxEvents);
             startIndex, maxEvents);
 
 
     taskHeartbeatHandler.progressing(attemptID);
     taskHeartbeatHandler.progressing(attemptID);
     
     
-    return new MapTaskCompletionEventsUpdate(
-        TypeConverter.fromYarn(events), shouldReset);
+    return new MapTaskCompletionEventsUpdate(events, shouldReset);
   }
   }
 
 
   @Override
   @Override

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -125,8 +125,8 @@ public class MRClientService extends AbstractService
               .getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
               .getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
       byte[] bytes = Base64.decodeBase64(secretKeyStr);
       byte[] bytes = Base64.decodeBase64(secretKeyStr);
       secretManager =
       secretManager =
-          new ClientToAMTokenSecretManager(this.appContext.getApplicationID(),
-            bytes);
+          new ClientToAMTokenSecretManager(
+            this.appContext.getApplicationAttemptId(), bytes);
     }
     }
     server =
     server =
         rpc.getServer(MRClientProtocol.class, protocolHandler, address,
         rpc.getServer(MRClientProtocol.class, protocolHandler, address,

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java

@@ -24,6 +24,7 @@ import java.util.Map;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
@@ -88,7 +89,7 @@ public interface Job {
   TaskAttemptCompletionEvent[]
   TaskAttemptCompletionEvent[]
       getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
       getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
 
 
-  TaskAttemptCompletionEvent[]
+  TaskCompletionEvent[]
       getMapAttemptCompletionEvents(int startIndex, int maxEvents);
       getMapAttemptCompletionEvents(int startIndex, int maxEvents);
 
 
   /**
   /**

+ 56 - 23
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobContext;
@@ -130,6 +131,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   private static final TaskAttemptCompletionEvent[]
   private static final TaskAttemptCompletionEvent[]
     EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
     EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
 
 
+  private static final TaskCompletionEvent[]
+    EMPTY_TASK_COMPLETION_EVENTS = new TaskCompletionEvent[0];
+
   private static final Log LOG = LogFactory.getLog(JobImpl.class);
   private static final Log LOG = LogFactory.getLog(JobImpl.class);
 
 
   //The maximum fraction of fetch failures allowed for a map
   //The maximum fraction of fetch failures allowed for a map
@@ -196,7 +200,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   private int allowedMapFailuresPercent = 0;
   private int allowedMapFailuresPercent = 0;
   private int allowedReduceFailuresPercent = 0;
   private int allowedReduceFailuresPercent = 0;
   private List<TaskAttemptCompletionEvent> taskAttemptCompletionEvents;
   private List<TaskAttemptCompletionEvent> taskAttemptCompletionEvents;
-  private List<TaskAttemptCompletionEvent> mapAttemptCompletionEvents;
+  private List<TaskCompletionEvent> mapAttemptCompletionEvents;
+  private List<Integer> taskCompletionIdxToMapCompletionIdx;
   private final List<String> diagnostics = new ArrayList<String>();
   private final List<String> diagnostics = new ArrayList<String>();
   
   
   //task/attempt related datastructures
   //task/attempt related datastructures
@@ -684,27 +689,31 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   @Override
   @Override
   public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
   public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
       int fromEventId, int maxEvents) {
       int fromEventId, int maxEvents) {
-    return getAttemptCompletionEvents(taskAttemptCompletionEvents,
-        fromEventId, maxEvents);
+    TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
+    readLock.lock();
+    try {
+      if (taskAttemptCompletionEvents.size() > fromEventId) {
+        int actualMax = Math.min(maxEvents,
+            (taskAttemptCompletionEvents.size() - fromEventId));
+        events = taskAttemptCompletionEvents.subList(fromEventId,
+            actualMax + fromEventId).toArray(events);
+      }
+      return events;
+    } finally {
+      readLock.unlock();
+    }
   }
   }
 
 
   @Override
   @Override
-  public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
+  public TaskCompletionEvent[] getMapAttemptCompletionEvents(
       int startIndex, int maxEvents) {
       int startIndex, int maxEvents) {
-    return getAttemptCompletionEvents(mapAttemptCompletionEvents,
-        startIndex, maxEvents);
-  }
-
-  private TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
-      List<TaskAttemptCompletionEvent> eventList,
-      int startIndex, int maxEvents) {
-    TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
+    TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
     readLock.lock();
     readLock.lock();
     try {
     try {
-      if (eventList.size() > startIndex) {
+      if (mapAttemptCompletionEvents.size() > startIndex) {
         int actualMax = Math.min(maxEvents,
         int actualMax = Math.min(maxEvents,
-            (eventList.size() - startIndex));
-        events = eventList.subList(startIndex,
+            (mapAttemptCompletionEvents.size() - startIndex));
+        events = mapAttemptCompletionEvents.subList(startIndex,
             actualMax + startIndex).toArray(events);
             actualMax + startIndex).toArray(events);
       }
       }
       return events;
       return events;
@@ -1068,9 +1077,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
     boolean smallCpu =
     boolean smallCpu =
         (
         (
             Math.max(
             Math.max(
-                conf.getInt(MRJobConfig.MAP_CPU_VCORES, 1), 
-                conf.getInt(MRJobConfig.REDUCE_CPU_VCORES, 1)) < 
-             sysCPUSizeForUberSlot
+                conf.getInt(
+                    MRJobConfig.MAP_CPU_VCORES, 
+                    MRJobConfig.DEFAULT_MAP_CPU_VCORES), 
+                conf.getInt(
+                    MRJobConfig.REDUCE_CPU_VCORES, 
+                    MRJobConfig.DEFAULT_REDUCE_CPU_VCORES)) 
+             <= sysCPUSizeForUberSlot
         );
         );
     boolean notChainJob = !isChainJob(conf);
     boolean notChainJob = !isChainJob(conf);
 
 
@@ -1243,7 +1256,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
             new ArrayList<TaskAttemptCompletionEvent>(
             new ArrayList<TaskAttemptCompletionEvent>(
                 job.numMapTasks + job.numReduceTasks + 10);
                 job.numMapTasks + job.numReduceTasks + 10);
         job.mapAttemptCompletionEvents =
         job.mapAttemptCompletionEvents =
-            new ArrayList<TaskAttemptCompletionEvent>(job.numMapTasks + 10);
+            new ArrayList<TaskCompletionEvent>(job.numMapTasks + 10);
+        job.taskCompletionIdxToMapCompletionIdx = new ArrayList<Integer>(
+            job.numMapTasks + job.numReduceTasks + 10);
 
 
         job.allowedMapFailuresPercent =
         job.allowedMapFailuresPercent =
             job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0);
             job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0);
@@ -1558,19 +1573,37 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       //eventId is equal to index in the arraylist
       //eventId is equal to index in the arraylist
       tce.setEventId(job.taskAttemptCompletionEvents.size());
       tce.setEventId(job.taskAttemptCompletionEvents.size());
       job.taskAttemptCompletionEvents.add(tce);
       job.taskAttemptCompletionEvents.add(tce);
+      int mapEventIdx = -1;
       if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
       if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
-        job.mapAttemptCompletionEvents.add(tce);
+        // we track map completions separately from task completions because
+        // - getMapAttemptCompletionEvents uses index ranges specific to maps
+        // - type converting the same events over and over is expensive
+        mapEventIdx = job.mapAttemptCompletionEvents.size();
+        job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
       }
       }
+      job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
       
       
       TaskAttemptId attemptId = tce.getAttemptId();
       TaskAttemptId attemptId = tce.getAttemptId();
       TaskId taskId = attemptId.getTaskId();
       TaskId taskId = attemptId.getTaskId();
       //make the previous completion event as obsolete if it exists
       //make the previous completion event as obsolete if it exists
-      Object successEventNo = 
-        job.successAttemptCompletionEventNoMap.remove(taskId);
+      Integer successEventNo =
+          job.successAttemptCompletionEventNoMap.remove(taskId);
       if (successEventNo != null) {
       if (successEventNo != null) {
         TaskAttemptCompletionEvent successEvent = 
         TaskAttemptCompletionEvent successEvent = 
-          job.taskAttemptCompletionEvents.get((Integer) successEventNo);
+          job.taskAttemptCompletionEvents.get(successEventNo);
         successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
         successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
+        int mapCompletionIdx =
+            job.taskCompletionIdxToMapCompletionIdx.get(successEventNo);
+        if (mapCompletionIdx >= 0) {
+          // update the corresponding TaskCompletionEvent for the map
+          TaskCompletionEvent mapEvent =
+              job.mapAttemptCompletionEvents.get(mapCompletionIdx);
+          job.mapAttemptCompletionEvents.set(mapCompletionIdx,
+              new TaskCompletionEvent(mapEvent.getEventId(),
+                  mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(),
+                  mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE,
+                  mapEvent.getTaskTrackerHttp()));
+        }
       }
       }
       
       
       // if this attempt is not successful then why is the previous successful 
       // if this attempt is not successful then why is the previous successful 

+ 13 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo

@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo
 org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo

+ 7 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java

@@ -34,6 +34,7 @@ import java.util.Arrays;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
@@ -153,9 +154,12 @@ public class TestTaskAttemptListenerImpl {
       .thenReturn(Arrays.copyOfRange(taskEvents, 0, 2));
       .thenReturn(Arrays.copyOfRange(taskEvents, 0, 2));
     when(mockJob.getTaskAttemptCompletionEvents(2, 100))
     when(mockJob.getTaskAttemptCompletionEvents(2, 100))
       .thenReturn(Arrays.copyOfRange(taskEvents, 2, 4));
       .thenReturn(Arrays.copyOfRange(taskEvents, 2, 4));
-    when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(mapEvents);
-    when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(mapEvents);
-    when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(empty);
+    when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(
+        TypeConverter.fromYarn(mapEvents));
+    when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(
+        TypeConverter.fromYarn(mapEvents));
+    when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(
+        TypeConverter.fromYarn(empty));
 
 
     AppContext appCtx = mock(AppContext.class);
     AppContext appCtx = mock(AppContext.class);
     when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
     when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.ShuffleHandler;
 import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
@@ -556,7 +557,7 @@ public class MockJobs extends MockApps {
       }
       }
 
 
       @Override
       @Override
-      public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
+      public TaskCompletionEvent[] getMapAttemptCompletionEvents(
           int startIndex, int maxEvents) {
           int startIndex, int maxEvents) {
         return null;
         return null;
       }
       }

+ 12 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java

@@ -25,8 +25,10 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Iterator;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -150,14 +152,16 @@ public class TestFetchFailure {
     Assert.assertEquals("Event status not correct for reduce attempt1",
     Assert.assertEquals("Event status not correct for reduce attempt1",
         TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
         TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
 
 
-    TaskAttemptCompletionEvent mapEvents[] =
+    TaskCompletionEvent mapEvents[] =
         job.getMapAttemptCompletionEvents(0, 2);
         job.getMapAttemptCompletionEvents(0, 2);
+    TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
     Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
     Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
     Assert.assertArrayEquals("Unexpected map events",
     Assert.assertArrayEquals("Unexpected map events",
-        Arrays.copyOfRange(events, 0, 2), mapEvents);
+        Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
     mapEvents = job.getMapAttemptCompletionEvents(2, 200);
     mapEvents = job.getMapAttemptCompletionEvents(2, 200);
     Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
     Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
-    Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]);
+    Assert.assertEquals("Unexpected map event", convertedEvents[2],
+        mapEvents[0]);
   }
   }
   
   
   /**
   /**
@@ -395,14 +399,16 @@ public class TestFetchFailure {
     Assert.assertEquals("Event status not correct for reduce attempt1",
     Assert.assertEquals("Event status not correct for reduce attempt1",
         TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
         TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
 
 
-    TaskAttemptCompletionEvent mapEvents[] =
+    TaskCompletionEvent mapEvents[] =
         job.getMapAttemptCompletionEvents(0, 2);
         job.getMapAttemptCompletionEvents(0, 2);
+    TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
     Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
     Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
     Assert.assertArrayEquals("Unexpected map events",
     Assert.assertArrayEquals("Unexpected map events",
-        Arrays.copyOfRange(events, 0, 2), mapEvents);
+        Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
     mapEvents = job.getMapAttemptCompletionEvents(2, 200);
     mapEvents = job.getMapAttemptCompletionEvents(2, 200);
     Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
     Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
-    Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]);
+    Assert.assertEquals("Unexpected map event", convertedEvents[2],
+        mapEvents[0]);
   }
   }
   
   
 
 

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
@@ -441,7 +442,7 @@ public class TestRuntimeEstimators {
     }
     }
 
 
     @Override
     @Override
-    public TaskAttemptCompletionEvent[]
+    public TaskCompletionEvent[]
             getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
             getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
       throw new UnsupportedOperationException("Not supported yet.");
       throw new UnsupportedOperationException("Not supported yet.");
     }
     }

+ 17 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.mapreduce.v2.app.launcher;
 package org.apache.hadoop.mapreduce.v2.app.launcher;
 
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.any;

+ 17 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.mapreduce.v2.app.local;
 package org.apache.hadoop.mapreduce.v2.app.local;
 
 
 import static org.mockito.Matchers.isA;
 import static org.mockito.Matchers.isA;

+ 4 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java

@@ -82,10 +82,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
@@ -95,7 +93,9 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsReques
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
 import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
 
 
@@ -109,8 +109,7 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol {
   
   
   public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
   public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
     RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class);
     RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class);
-    proxy = (MRClientProtocolPB)RPC.getProxy(
-        MRClientProtocolPB.class, clientVersion, addr, conf);
+    proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf);
   }
   }
   
   
   @Override
   @Override

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java

@@ -73,14 +73,10 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
@@ -99,8 +95,12 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptReque
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;

+ 5 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java

@@ -18,8 +18,8 @@
 package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
 package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
 
 
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProtoOrBuilder;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProtoOrBuilder;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
@@ -52,10 +52,7 @@ public class CancelDelegationTokenRequestPBImpl extends
     if (this.token != null) {
     if (this.token != null) {
       return this.token;
       return this.token;
     }
     }
-    if (!p.hasDelegationToken()) {
-      return null;
-    }
-    this.token = convertFromProtoFormat(p.getDelegationToken());
+    this.token = convertFromProtoFormat(p.getToken());
     return this.token;
     return this.token;
   }
   }
 
 
@@ -63,7 +60,7 @@ public class CancelDelegationTokenRequestPBImpl extends
   public void setDelegationToken(DelegationToken token) {
   public void setDelegationToken(DelegationToken token) {
     maybeInitBuilder();
     maybeInitBuilder();
     if (token == null) 
     if (token == null) 
-      builder.clearDelegationToken();
+      builder.clearToken();
     this.token = token;
     this.token = token;
   }
   }
 
 
@@ -78,7 +75,7 @@ public class CancelDelegationTokenRequestPBImpl extends
 
 
   private void mergeLocalToBuilder() {
   private void mergeLocalToBuilder() {
     if (token != null) {
     if (token != null) {
-      builder.setDelegationToken(convertToProtoFormat(this.token));
+      builder.setToken(convertToProtoFormat(this.token));
     }
     }
   }
   }
 
 

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
 package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
 
 
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
-import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 
 
 public class CancelDelegationTokenResponsePBImpl extends
 public class CancelDelegationTokenResponsePBImpl extends

Niektoré súbory nie sú zobrazené, pretože je v týchto rozdielových dátach zmenené mnoho súborov