Browse Source

Merging trunk to branch-trunk-win

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-trunk-win@1437113 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 years ago
parent
commit
54dbffebaa
100 changed files with 2344 additions and 847 deletions
  1. 2 2
      dev-support/test-patch.sh
  2. 77 4
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  4. 18 1
      hadoop-common-project/hadoop-common/pom.xml
  5. 17 0
      hadoop-common-project/hadoop-common/src/config.h.cmake
  6. 4 4
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  7. 219 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  8. 0 16
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml
  9. 14 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  10. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  11. 21 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  12. 10 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  13. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  14. 76 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  15. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  16. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
  17. 24 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  18. 10 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  19. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  20. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  21. 62 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
  22. 15 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  23. 20 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  24. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  25. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  26. 31 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java
  27. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java
  28. 57 0
      hadoop-common-project/hadoop-common/src/main/proto/Security.proto
  29. 13 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
  30. 18 2
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  31. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
  32. 158 39
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
  33. 17 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
  34. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
  35. 100 31
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
  36. 12 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java
  37. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
  38. 29 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  39. 12 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java
  40. 161 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
  41. 15 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
  42. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  43. 13 0
      hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
  44. 17 1
      hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh
  45. 0 3
      hadoop-common-project/pom.xml
  46. 0 3
      hadoop-dist/pom.xml
  47. 2 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  48. 17 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
  49. 26 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
  50. 105 4
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  51. 27 0
      hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt
  52. 34 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  53. 11 1
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  54. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
  55. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  56. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
  57. 139 53
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  58. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  59. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  60. 8 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  61. 12 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
  62. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
  63. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  64. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
  65. 0 179
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
  66. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java
  67. 8 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
  68. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
  69. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
  70. 20 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  71. 12 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
  72. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
  73. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  74. 107 92
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  75. 50 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  77. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
  78. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
  79. 14 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
  80. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  81. 64 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  82. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
  83. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java
  84. 12 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
  85. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java
  86. 0 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html
  87. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
  88. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
  89. 20 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  90. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  91. 73 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  92. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  93. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  94. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  95. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  96. 18 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  97. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  98. 54 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  99. 20 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  100. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

+ 2 - 2
dev-support/test-patch.sh

@@ -980,12 +980,12 @@ fi
 (( RESULT = RESULT + $JAVAC_RET ))
 (( RESULT = RESULT + $JAVAC_RET ))
 checkJavadocWarnings
 checkJavadocWarnings
 (( RESULT = RESULT + $? ))
 (( RESULT = RESULT + $? ))
-checkEclipseGeneration
-(( RESULT = RESULT + $? ))
 ### Checkstyle not implemented yet
 ### Checkstyle not implemented yet
 #checkStyle
 #checkStyle
 #(( RESULT = RESULT + $? ))
 #(( RESULT = RESULT + $? ))
 buildAndInstall
 buildAndInstall
+checkEclipseGeneration
+(( RESULT = RESULT + $? ))
 checkFindbugsWarnings
 checkFindbugsWarnings
 (( RESULT = RESULT + $? ))
 (( RESULT = RESULT + $? ))
 checkReleaseAuditWarnings
 checkReleaseAuditWarnings

+ 77 - 4
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -146,8 +146,13 @@ Trunk (Unreleased)
     HADOOP-9162. Add utility to check native library availability.
     HADOOP-9162. Add utility to check native library availability.
     (Binglin Chang via suresh)
     (Binglin Chang via suresh)
 
 
+    HADOOP-8924. Add maven plugin alternative to shell script to save
+    package-info.java. (Chris Nauroth via suresh)
+
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
+
     HADOOP-9041. FsUrlStreamHandlerFactory could cause an infinite loop in
     HADOOP-9041. FsUrlStreamHandlerFactory could cause an infinite loop in
     FileSystem initialization. (Yanbo Liang and Radim Kolar via llu)
     FileSystem initialization. (Yanbo Liang and Radim Kolar via llu)
 
 
@@ -306,9 +311,14 @@ Trunk (Unreleased)
     HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
     HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
     Windows. (Chris Nauroth via suresh)
     Windows. (Chris Nauroth via suresh)
 
 
-    HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for
+    HADOOP-8957. AbstractFileSystem#IsValidName should be overridden for
     embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
     embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
 
 
+    HADOOP-9139. improve killKdc.sh (Ivan A. Veselovsky via bobby)
+
+    HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
+    a new module to the build (Chris Nauroth via bobby)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -319,6 +329,8 @@ Release 2.0.3-alpha - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    HADOOP-8999. SASL negotiation is flawed (daryn)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-8597. Permit FsShell's text command to read Avro files.
     HADOOP-8597. Permit FsShell's text command to read Avro files.
@@ -420,6 +432,24 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-8427. Convert Forrest docs to APT, incremental. (adi2 via tucu)
     HADOOP-8427. Convert Forrest docs to APT, incremental. (adi2 via tucu)
 
 
+    HADOOP-9173. Add security token protobuf definition to common and
+    use it in hdfs. (suresh)
+
+    HADOOP-9119. Add test to FileSystemContractBaseTest to verify integrity
+    of overwritten files. (Steve Loughran via suresh)
+
+    HADOOP-9192. Move token related request/response messages to common.
+    (suresh)
+
+    HADOOP-8712. Change default hadoop.security.group.mapping to
+    JniBasedUnixGroupsNetgroupMappingWithFallback (Robert Parker via todd)
+
+    HADOOP-9106. Allow configuration of IPC connect timeout.
+    (Rober Parker via suresh)
+
+    HADOOP-9216. CompressionCodecFactory#getCodecClasses should trim the
+    result of parsing by Configuration. (Tsuyoshi Ozawa via todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -480,8 +510,6 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
     HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
 
 
-    HADOOP-8999. SASL negotiation is flawed (daryn)
-
     HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
     HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
 
 
     HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
     HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
@@ -516,6 +544,31 @@ Release 2.0.3-alpha - Unreleased
     HADOOP-9153. Support createNonRecursive in ViewFileSystem.
     HADOOP-9153. Support createNonRecursive in ViewFileSystem.
     (Sandy Ryza via tomwhite)
     (Sandy Ryza via tomwhite)
 
 
+    HADOOP-9181. Set daemon flag for HttpServer's QueuedThreadPool.
+    (Liang Xie via suresh)
+
+    HADOOP-9155. FsPermission should have different default value, 777 for
+    directory and 666 for file. (Binglin Chang via atm)
+
+    HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
+
+    HADOOP-9203. RPCCallBenchmark should find a random available port.
+    (Andrew Purtell via suresh)
+
+    HADOOP-9178. src/main/conf is missing hadoop-policy.xml.
+    (Sandy Ryza via eli)
+
+    HADOOP-8816. HTTP Error 413 full HEAD if using kerberos authentication. 
+    (moritzmoeller via tucu)
+    
+    HADOOP-9212. Potential deadlock in FileSystem.Cache/IPC/UGI. (tomwhite)
+
+    HADOOP-9193. hadoop script can inadvertently expand wildcard arguments
+    when delegating to hdfs script. (Andy Isaacson via todd)
+
+    HADOOP-9215. when using cmake-2.6, libhadoop.so doesn't get created
+    (only libhadoop.so.1.0.0) (Colin Patrick McCabe via todd)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1206,6 +1259,21 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
     HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
     bobby) 
     bobby) 
 
 
+Release 0.23.7 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
+    permissions (Ivan A. Veselovsky via bobby)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1213,6 +1281,8 @@ Release 0.23.6 - UNRELEASED
   NEW FEATURES
   NEW FEATURES
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+    HADOOP-9217. Print thread dumps when hadoop-common tests fail.
+    (Andrey Klochkov via suresh)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -1229,7 +1299,10 @@ Release 0.23.6 - UNRELEASED
 
 
     HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby)
     HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby)
 
 
-Release 0.23.5 - UNRELEASED
+    HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves)
+
+Release 0.23.5 - 2012-11-28
+
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -282,6 +282,10 @@
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
     </Match>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
+    </Match>
 
 
     <!--
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.

+ 18 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -431,6 +431,7 @@
                 <argument>src/main/proto/RpcHeader.proto</argument>
                 <argument>src/main/proto/RpcHeader.proto</argument>
                 <argument>src/main/proto/ZKFCProtocol.proto</argument>
                 <argument>src/main/proto/ZKFCProtocol.proto</argument>
                 <argument>src/main/proto/ProtobufRpcEngine.proto</argument>
                 <argument>src/main/proto/ProtobufRpcEngine.proto</argument>
+                <argument>src/main/proto/Security.proto</argument>
               </arguments>
               </arguments>
             </configuration>
             </configuration>
           </execution>
           </execution>
@@ -494,13 +495,26 @@
             <exclude>dev-support/jdiff/**</exclude>
             <exclude>dev-support/jdiff/**</exclude>
             <exclude>src/main/native/*</exclude>
             <exclude>src/main/native/*</exclude>
             <exclude>src/main/native/config/*</exclude>
             <exclude>src/main/native/config/*</exclude>
-            <exclude>src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo</exclude>
             <exclude>src/main/native/m4/*</exclude>
             <exclude>src/main/native/m4/*</exclude>
             <exclude>src/test/empty-file</exclude>
             <exclude>src/test/empty-file</exclude>
             <exclude>src/test/all-tests</exclude>
             <exclude>src/test/all-tests</exclude>
+            <exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
+            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
+        </configuration>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 
@@ -584,6 +598,9 @@
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
                       <arg line="VERBOSE=1"/>
                     </exec>
                     </exec>
+                    <!-- The second make is a workaround for HADOOP-9215.  It can
+                         be removed when version 2.6 of cmake is no longer supported . -->
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
                   </target>
                   </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 17 - 0
hadoop-common-project/hadoop-common/src/config.h.cmake

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 #ifndef CONFIG_H
 #ifndef CONFIG_H
 #define CONFIG_H
 #define CONFIG_H
 
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -58,9 +58,9 @@ case $COMMAND in
     #try to locate hdfs and if present, delegate to it.  
     #try to locate hdfs and if present, delegate to it.  
     shift
     shift
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
     if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  $*
+      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  "$@"
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
+      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
     else
     else
       echo "HADOOP_HDFS_HOME not found!"
       echo "HADOOP_HDFS_HOME not found!"
       exit 1
       exit 1
@@ -75,9 +75,9 @@ case $COMMAND in
     #try to locate mapred and if present, delegate to it.
     #try to locate mapred and if present, delegate to it.
     shift
     shift
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
     if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $*
+      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
     elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $*
+      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
     else
     else
       echo "HADOOP_MAPRED_HOME not found!"
       echo "HADOOP_MAPRED_HOME not found!"
       exit 1
       exit 1

+ 219 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -0,0 +1,219 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>${HADOOP_HDFS_USER}</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for ResourceTracker protocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>${HADOOP_YARN_USER}</value>
+    <description>ACL for RMAdminProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientRMProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.resourcemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AMRMProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManager protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+</configuration>

+ 0 - 16
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml

@@ -116,22 +116,6 @@
             <td>ACL for NamenodeProtocol, the protocol used by the secondary
             <td>ACL for NamenodeProtocol, the protocol used by the secondary
             namenode to communicate with the namenode.</td>
             namenode to communicate with the namenode.</td>
           </tr>
           </tr>
-          <tr>
-            <td><code>security.inter.tracker.protocol.acl</code></td>
-            <td>ACL for InterTrackerProtocol, used by the tasktrackers to 
-            communicate with the jobtracker.</td>
-          </tr>
-          <tr>
-            <td><code>security.job.submission.protocol.acl</code></td>
-            <td>ACL for JobSubmissionProtocol, used by job clients to 
-            communciate with the jobtracker for job submission, querying job status 
-            etc.</td>
-          </tr>
-          <tr>
-            <td><code>security.task.umbilical.protocol.acl</code></td>
-            <td>ACL for TaskUmbilicalProtocol, used by the map and reduce 
-            tasks to communicate with the parent tasktracker.</td>
-          </tr>
           <tr>
           <tr>
             <td><code>security.refresh.policy.protocol.acl</code></td>
             <td><code>security.refresh.policy.protocol.acl</code></td>
             <td>ACL for RefreshAuthorizationPolicyProtocol, used by the 
             <td>ACL for RefreshAuthorizationPolicyProtocol, used by the 

+ 14 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.authorize.Service;
 
 
 /** 
 /** 
  * This class contains constants for configuration keys used
  * This class contains constants for configuration keys used
@@ -114,7 +115,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 
   public static final String 
   SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
   SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
-  
+  public static final String
+  SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl";
+  public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL =
+      "security.client.datanode.protocol.acl";
+  public static final String
+  SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl";
+  public static final String
+  SECURITY_INTER_DATANODE_PROTOCOL_ACL = "security.inter.datanode.protocol.acl";
+  public static final String
+  SECURITY_NAMENODE_PROTOCOL_ACL = "security.namenode.protocol.acl";
+  public static final String SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL =
+      "security.qjournal.service.protocol.acl";
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
       "hadoop.security.token.service.use_ip";
       "hadoop.security.token.service.use_ip";
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
@@ -191,4 +203,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
     4*60*60; // 4 hours
     4*60*60; // 4 hours
 
 
-}
+}

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -173,6 +173,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
   /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
   public static final int     IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
   public static final int     IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  IPC_CLIENT_CONNECT_TIMEOUT_KEY =
+    "ipc.client.connect.timeout";
+  /** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */
+  public static final int     IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
     "ipc.client.connect.max.retries";
     "ipc.client.connect.max.retries";
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */

+ 21 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -172,7 +172,25 @@ import org.apache.hadoop.util.ShutdownHookManager;
 public final class FileContext {
 public final class FileContext {
   
   
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   public static final Log LOG = LogFactory.getLog(FileContext.class);
+  /**
+   * Default permission for directory and symlink
+   * In previous versions, this default permission was also used to
+   * create files, so files created end up with ugo+x permission.
+   * See HADOOP-9155 for detail. 
+   * Two new constants are added to solve this, please use 
+   * {@link FileContext#DIR_DEFAULT_PERM} for directory, and use
+   * {@link FileContext#FILE_DEFAULT_PERM} for file.
+   * This constant is kept for compatibility.
+   */
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
   public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
+  /**
+   * Default permission for directory
+   */
+  public static final FsPermission DIR_DEFAULT_PERM = FsPermission.getDirDefault();
+  /**
+   * Default permission for file
+   */
+  public static final FsPermission FILE_DEFAULT_PERM = FsPermission.getFileDefault();
 
 
   /**
   /**
    * Priority of the FileContext shutdown hook.
    * Priority of the FileContext shutdown hook.
@@ -656,7 +674,7 @@ public final class FileContext {
     CreateOpts.Perms permOpt = 
     CreateOpts.Perms permOpt = 
       (CreateOpts.Perms) CreateOpts.getOpt(CreateOpts.Perms.class, opts);
       (CreateOpts.Perms) CreateOpts.getOpt(CreateOpts.Perms.class, opts);
     FsPermission permission = (permOpt != null) ? permOpt.getValue() :
     FsPermission permission = (permOpt != null) ? permOpt.getValue() :
-                                      FsPermission.getDefault();
+                                      FILE_DEFAULT_PERM;
     permission = permission.applyUMask(umask);
     permission = permission.applyUMask(umask);
 
 
     final CreateOpts[] updatedOpts = 
     final CreateOpts[] updatedOpts = 
@@ -704,7 +722,7 @@ public final class FileContext {
       IOException {
       IOException {
     final Path absDir = fixRelativePart(dir);
     final Path absDir = fixRelativePart(dir);
     final FsPermission absFerms = (permission == null ? 
     final FsPermission absFerms = (permission == null ? 
-          FsPermission.getDefault() : permission).applyUMask(umask);
+          FsPermission.getDirDefault() : permission).applyUMask(umask);
     new FSLinkResolver<Void>() {
     new FSLinkResolver<Void>() {
       @Override
       @Override
       public Void next(final AbstractFileSystem fs, final Path p) 
       public Void next(final AbstractFileSystem fs, final Path p) 
@@ -2157,7 +2175,7 @@ public final class FileContext {
       FileStatus fs = FileContext.this.getFileStatus(qSrc);
       FileStatus fs = FileContext.this.getFileStatus(qSrc);
       if (fs.isDirectory()) {
       if (fs.isDirectory()) {
         checkDependencies(qSrc, qDst);
         checkDependencies(qSrc, qDst);
-        mkdir(qDst, FsPermission.getDefault(), true);
+        mkdir(qDst, FsPermission.getDirDefault(), true);
         FileStatus[] contents = listStatus(qSrc);
         FileStatus[] contents = listStatus(qSrc);
         for (FileStatus content : contents) {
         for (FileStatus content : contents) {
           copy(makeQualified(content.getPath()), makeQualified(new Path(qDst,
           copy(makeQualified(content.getPath()), makeQualified(new Path(qDst,

+ 10 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -79,8 +79,15 @@ public class FileStatus implements Writable, Comparable {
     this.blocksize = blocksize;
     this.blocksize = blocksize;
     this.modification_time = modification_time;
     this.modification_time = modification_time;
     this.access_time = access_time;
     this.access_time = access_time;
-    this.permission = (permission == null) ? 
-                      FsPermission.getDefault() : permission;
+    if (permission != null) {
+      this.permission = permission;
+    } else if (isdir) {
+      this.permission = FsPermission.getDirDefault();
+    } else if (symlink!=null) {
+      this.permission = FsPermission.getDefault();
+    } else {
+      this.permission = FsPermission.getFileDefault();
+    }
     this.owner = (owner == null) ? "" : owner;
     this.owner = (owner == null) ? "" : owner;
     this.group = (group == null) ? "" : group;
     this.group = (group == null) ? "" : group;
     this.symlink = symlink;
     this.symlink = symlink;
@@ -217,7 +224,7 @@ public class FileStatus implements Writable, Comparable {
    */
    */
   protected void setPermission(FsPermission permission) {
   protected void setPermission(FsPermission permission) {
     this.permission = (permission == null) ? 
     this.permission = (permission == null) ? 
-                      FsPermission.getDefault() : permission;
+                      FsPermission.getFileDefault() : permission;
   }
   }
   
   
   /**
   /**

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -850,7 +850,7 @@ public abstract class FileSystem extends Configured implements Closeable {
                                             long blockSize,
                                             long blockSize,
                                             Progressable progress
                                             Progressable progress
                                             ) throws IOException {
                                             ) throws IOException {
-    return this.create(f, FsPermission.getDefault().applyUMask(
+    return this.create(f, FsPermission.getFileDefault().applyUMask(
         FsPermission.getUMask(getConf())), overwrite, bufferSize,
         FsPermission.getUMask(getConf())), overwrite, bufferSize,
         replication, blockSize, progress);
         replication, blockSize, progress);
   }
   }
@@ -1030,7 +1030,7 @@ public abstract class FileSystem extends Configured implements Closeable {
       boolean overwrite,
       boolean overwrite,
       int bufferSize, short replication, long blockSize,
       int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
-    return this.createNonRecursive(f, FsPermission.getDefault(),
+    return this.createNonRecursive(f, FsPermission.getFileDefault(),
         overwrite, bufferSize, replication, blockSize, progress);
         overwrite, bufferSize, replication, blockSize, progress);
   }
   }
 
 
@@ -1866,7 +1866,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Call {@link #mkdirs(Path, FsPermission)} with default permission.
    * Call {@link #mkdirs(Path, FsPermission)} with default permission.
    */
    */
   public boolean mkdirs(Path f) throws IOException {
   public boolean mkdirs(Path f) throws IOException {
-    return mkdirs(f, FsPermission.getDefault());
+    return mkdirs(f, FsPermission.getDirDefault());
   }
   }
 
 
   /**
   /**

+ 76 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -101,33 +101,98 @@ public class FileUtil {
    * (4) If dir is a normal directory, then dir and all its contents recursively
    * (4) If dir is a normal directory, then dir and all its contents recursively
    *     are deleted.
    *     are deleted.
    */
    */
-  public static boolean fullyDelete(File dir) {
-    if (dir.delete()) {
+  public static boolean fullyDelete(final File dir) {
+    return fullyDelete(dir, false);
+  }
+  
+  /**
+   * Delete a directory and all its contents.  If
+   * we return false, the directory may be partially-deleted.
+   * (1) If dir is symlink to a file, the symlink is deleted. The file pointed
+   *     to by the symlink is not deleted.
+   * (2) If dir is symlink to a directory, symlink is deleted. The directory
+   *     pointed to by symlink is not deleted.
+   * (3) If dir is a normal file, it is deleted.
+   * (4) If dir is a normal directory, then dir and all its contents recursively
+   *     are deleted.
+   * @param dir the file or directory to be deleted
+   * @param tryGrantPermissions true if permissions should be modified to delete a file.
+   * @return true on success false on failure.
+   */
+  public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
+    if (tryGrantPermissions) {
+      // try to chmod +rwx the parent folder of the 'dir': 
+      File parent = dir.getParentFile();
+      grantPermissions(parent);
+    }
+    if (deleteImpl(dir, false)) {
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // dir is (a) normal file, (b) symlink to a file, (c) empty directory or
       // (d) symlink to a directory
       // (d) symlink to a directory
       return true;
       return true;
     }
     }
-
     // handle nonempty directory deletion
     // handle nonempty directory deletion
-    if (!fullyDeleteContents(dir)) {
+    if (!fullyDeleteContents(dir, tryGrantPermissions)) {
       return false;
       return false;
     }
     }
-    return dir.delete();
+    return deleteImpl(dir, true);
+  }
+  
+  /*
+   * Pure-Java implementation of "chmod +rwx f".
+   */
+  private static void grantPermissions(final File f) {
+      f.setExecutable(true);
+      f.setReadable(true);
+      f.setWritable(true);
   }
   }
 
 
+  private static boolean deleteImpl(final File f, final boolean doLog) {
+    if (f == null) {
+      LOG.warn("null file argument.");
+      return false;
+    }
+    final boolean wasDeleted = f.delete();
+    if (wasDeleted) {
+      return true;
+    }
+    final boolean ex = f.exists();
+    if (doLog && ex) {
+      LOG.warn("Failed to delete file or dir ["
+          + f.getAbsolutePath() + "]: it still exists.");
+    }
+    return !ex;
+  }
+  
   /**
   /**
    * Delete the contents of a directory, not the directory itself.  If
    * Delete the contents of a directory, not the directory itself.  If
    * we return false, the directory may be partially-deleted.
    * we return false, the directory may be partially-deleted.
    * If dir is a symlink to a directory, all the contents of the actual
    * If dir is a symlink to a directory, all the contents of the actual
    * directory pointed to by dir will be deleted.
    * directory pointed to by dir will be deleted.
    */
    */
-  public static boolean fullyDeleteContents(File dir) {
+  public static boolean fullyDeleteContents(final File dir) {
+    return fullyDeleteContents(dir, false);
+  }
+  
+  /**
+   * Delete the contents of a directory, not the directory itself.  If
+   * we return false, the directory may be partially-deleted.
+   * If dir is a symlink to a directory, all the contents of the actual
+   * directory pointed to by dir will be deleted.
+   * @param tryGrantPermissions if 'true', try grant +rwx permissions to this 
+   * and all the underlying directories before trying to delete their contents.
+   */
+  public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
+    if (tryGrantPermissions) {
+      // to be able to list the dir and delete files from it
+      // we must grant the dir rwx permissions: 
+      grantPermissions(dir);
+    }
     boolean deletionSucceeded = true;
     boolean deletionSucceeded = true;
-    File contents[] = dir.listFiles();
+    final File[] contents = dir.listFiles();
     if (contents != null) {
     if (contents != null) {
       for (int i = 0; i < contents.length; i++) {
       for (int i = 0; i < contents.length; i++) {
         if (contents[i].isFile()) {
         if (contents[i].isFile()) {
-          if (!contents[i].delete()) {// normal file or symlink to another file
+          if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file
             deletionSucceeded = false;
             deletionSucceeded = false;
             continue; // continue deletion of other files/dirs under dir
             continue; // continue deletion of other files/dirs under dir
           }
           }
@@ -135,16 +200,16 @@ public class FileUtil {
           // Either directory or symlink to another directory.
           // Either directory or symlink to another directory.
           // Try deleting the directory as this might be a symlink
           // Try deleting the directory as this might be a symlink
           boolean b = false;
           boolean b = false;
-          b = contents[i].delete();
+          b = deleteImpl(contents[i], false);
           if (b){
           if (b){
             //this was indeed a symlink or an empty directory
             //this was indeed a symlink or an empty directory
             continue;
             continue;
           }
           }
           // if not an empty directory or symlink let
           // if not an empty directory or symlink let
           // fullydelete handle it.
           // fullydelete handle it.
-          if (!fullyDelete(contents[i])) {
+          if (!fullyDelete(contents[i], tryGrantPermissions)) {
             deletionSucceeded = false;
             deletionSucceeded = false;
-            continue; // continue deletion of other files/dirs under dir
+            // continue deletion of other files/dirs under dir
           }
           }
         }
         }
       }
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -224,7 +224,7 @@ public class FTPFileSystem extends FileSystem {
     }
     }
     
     
     Path parent = absolute.getParent();
     Path parent = absolute.getParent();
-    if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
+    if (parent == null || !mkdirs(client, parent, FsPermission.getDirDefault())) {
       parent = (parent == null) ? new Path("/") : parent;
       parent = (parent == null) ? new Path("/") : parent;
       disconnect(client);
       disconnect(client);
       throw new IOException("create(): Mkdirs failed to create: " + parent);
       throw new IOException("create(): Mkdirs failed to create: " + parent);
@@ -484,7 +484,7 @@ public class FTPFileSystem extends FileSystem {
     if (!exists(client, absolute)) {
     if (!exists(client, absolute)) {
       Path parent = absolute.getParent();
       Path parent = absolute.getParent();
       created = (parent == null || mkdirs(client, parent, FsPermission
       created = (parent == null || mkdirs(client, parent, FsPermission
-          .getDefault()));
+          .getDirDefault()));
       if (created) {
       if (created) {
         String parentDir = parent.toUri().getPath();
         String parentDir = parent.toUri().getPath();
         client.changeWorkingDirectory(parentDir);
         client.changeWorkingDirectory(parentDir);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java

@@ -85,7 +85,7 @@ public class RawLocalFs extends DelegateToFileSystem {
                             "system: "+target.toString());
                             "system: "+target.toString());
     }
     }
     if (createParent) {
     if (createParent) {
-      mkdir(link.getParent(), FsPermission.getDefault(), true);
+      mkdir(link.getParent(), FsPermission.getDirDefault(), true);
     }
     }
     // NB: Use createSymbolicLink in java.nio.file.Path once available
     // NB: Use createSymbolicLink in java.nio.file.Path once available
     try {
     try {

+ 24 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -275,11 +275,34 @@ public class FsPermission implements Writable {
     conf.setInt(DEPRECATED_UMASK_LABEL, umask.toShort());
     conf.setInt(DEPRECATED_UMASK_LABEL, umask.toShort());
   }
   }
 
 
-  /** Get the default permission. */
+  /**
+   * Get the default permission for directory and symlink.
+   * In previous versions, this default permission was also used to
+   * create files, so files created end up with ugo+x permission.
+   * See HADOOP-9155 for detail. 
+   * Two new methods are added to solve this, please use 
+   * {@link FsPermission#getDirDefault()} for directory, and use
+   * {@link FsPermission#getFileDefault()} for file.
+   * This method is kept for compatibility.
+   */
   public static FsPermission getDefault() {
   public static FsPermission getDefault() {
     return new FsPermission((short)00777);
     return new FsPermission((short)00777);
   }
   }
 
 
+  /**
+   * Get the default permission for directory.
+   */
+  public static FsPermission getDirDefault() {
+    return new FsPermission((short)00777);
+  }
+
+  /**
+   * Get the default permission for file.
+   */
+  public static FsPermission getFileDefault() {
+    return new FsPermission((short)00666);
+  }
+
   /**
   /**
    * Create a FsPermission from a Unix symbolic permission string
    * Create a FsPermission from a Unix symbolic permission string
    * @param unixSymbolicPermission e.g. "-rw-rw-rw-"
    * @param unixSymbolicPermission e.g. "-rw-rw-rw-"

+ 10 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -613,7 +613,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
     // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
     // may trigger the Connected event immediately. So, if we register the
     // may trigger the Connected event immediately. So, if we register the
     // watcher after constructing ZooKeeper, we may miss that event. Instead,
     // watcher after constructing ZooKeeper, we may miss that event. Instead,
-    // we construct the watcher first, and have it queue any events it receives
+    // we construct the watcher first, and have it block any events it receives
     // before we can set its ZooKeeper reference.
     // before we can set its ZooKeeper reference.
     WatcherWithClientRef watcher = new WatcherWithClientRef();
     WatcherWithClientRef watcher = new WatcherWithClientRef();
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
@@ -1002,19 +1002,17 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
     private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
 
 
     /**
     /**
-     * If any events arrive before the reference to ZooKeeper is set,
-     * they get queued up and later forwarded when the reference is
-     * available.
+     * Latch used to wait until the reference to ZooKeeper is set.
      */
      */
-    private final List<WatchedEvent> queuedEvents = Lists.newLinkedList();
+    private CountDownLatch hasSetZooKeeper = new CountDownLatch(1);
     
     
     private WatcherWithClientRef() {
     private WatcherWithClientRef() {
     }
     }
 
 
     private WatcherWithClientRef(ZooKeeper zk) {
     private WatcherWithClientRef(ZooKeeper zk) {
-      this.zk = zk;
+      setZooKeeperRef(zk);
     }
     }
-    
+
     /**
     /**
      * Waits for the next event from ZooKeeper to arrive.
      * Waits for the next event from ZooKeeper to arrive.
      * 
      * 
@@ -1029,9 +1027,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
           LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
           LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
               + connectionTimeoutMs + " milliseconds");
               + connectionTimeoutMs + " milliseconds");
-          synchronized (this) {
-            zk.close();
-          }
+          zk.close();
           throw KeeperException.create(Code.CONNECTIONLOSS);
           throw KeeperException.create(Code.CONNECTIONLOSS);
         }
         }
       } catch (InterruptedException e) {
       } catch (InterruptedException e) {
@@ -1041,29 +1037,18 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       }
       }
     }
     }
 
 
-    private synchronized void setZooKeeperRef(ZooKeeper zk) {
+    private void setZooKeeperRef(ZooKeeper zk) {
       Preconditions.checkState(this.zk == null,
       Preconditions.checkState(this.zk == null,
           "zk already set -- must be set exactly once");
           "zk already set -- must be set exactly once");
       this.zk = zk;
       this.zk = zk;
-      
-      for (WatchedEvent e : queuedEvents) {
-        forwardEvent(e);
-      }
-      queuedEvents.clear();
+      hasSetZooKeeper.countDown();
     }
     }
 
 
     @Override
     @Override
-    public synchronized void process(WatchedEvent event) {
-      if (zk != null) {
-        forwardEvent(event);
-      } else {
-        queuedEvents.add(event);
-      }
-    }
-    
-    private void forwardEvent(WatchedEvent event) {
+    public void process(WatchedEvent event) {
       hasReceivedEvent.countDown();
       hasReceivedEvent.countDown();
       try {
       try {
+        hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS);
         ActiveStandbyElector.this.processWatchEvent(
         ActiveStandbyElector.this.processWatchEvent(
             zk, event);
             zk, event);
       } catch (Throwable t) {
       } catch (Throwable t) {

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -246,6 +246,7 @@ public class HttpServer implements FilterContainer {
     // default value (currently 250).
     // default value (currently 250).
     QueuedThreadPool threadPool = maxThreads == -1 ?
     QueuedThreadPool threadPool = maxThreads == -1 ?
         new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
         new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
+    threadPool.setDaemon(true);
     webServer.setThreadPool(threadPool);
     webServer.setThreadPool(threadPool);
 
 
     final String appDir = getWebAppsPath(name);
     final String appDir = getWebAppsPath(name);
@@ -312,6 +313,7 @@ public class HttpServer implements FilterContainer {
       // the same port with indeterminate routing of incoming requests to them
       // the same port with indeterminate routing of incoming requests to them
       ret.setReuseAddress(false);
       ret.setReuseAddress(false);
     }
     }
+    ret.setHeaderBufferSize(1024*64);
     return ret;
     return ret;
   }
   }
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -122,7 +122,7 @@ public class CompressionCodecFactory {
     if (codecsString != null) {
     if (codecsString != null) {
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
       while (codecSplit.hasMoreElements()) {
       while (codecSplit.hasMoreElements()) {
-        String codecSubstring = codecSplit.nextToken();
+        String codecSubstring = codecSplit.nextToken().trim();
         if (codecSubstring.length() != 0) {
         if (codecSubstring.length() != 0) {
           try {
           try {
             Class<?> cls = conf.getClassByName(codecSubstring);
             Class<?> cls = conf.getClassByName(codecSubstring);

+ 62 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -40,14 +40,74 @@ public class GzipCodec extends DefaultCodec {
   protected static class GzipOutputStream extends CompressorStream {
   protected static class GzipOutputStream extends CompressorStream {
 
 
     private static class ResetableGZIPOutputStream extends GZIPOutputStream {
     private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-      
+      private static final int TRAILER_SIZE = 8;
+      public static final String JVMVendor= System.getProperty("java.vendor");
+      public static final String JVMVersion= System.getProperty("java.version");
+      private static final boolean HAS_BROKEN_FINISH =
+          (JVMVendor.contains("IBM") && JVMVersion.contains("1.6.0"));
+
       public ResetableGZIPOutputStream(OutputStream out) throws IOException {
       public ResetableGZIPOutputStream(OutputStream out) throws IOException {
         super(out);
         super(out);
       }
       }
-      
+
       public void resetState() throws IOException {
       public void resetState() throws IOException {
         def.reset();
         def.reset();
       }
       }
+
+      /**
+       * Override this method for HADOOP-8419.
+       * Override because IBM implementation calls def.end() which
+       * causes problem when reseting the stream for reuse.
+       *
+       */
+      @Override
+      public void finish() throws IOException {
+        if (HAS_BROKEN_FINISH) {
+          if (!def.finished()) {
+            def.finish();
+            while (!def.finished()) {
+              int i = def.deflate(this.buf, 0, this.buf.length);
+              if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) {
+                writeTrailer(this.buf, i);
+                i += TRAILER_SIZE;
+                out.write(this.buf, 0, i);
+
+                return;
+              }
+              if (i > 0) {
+                out.write(this.buf, 0, i);
+              }
+            }
+
+            byte[] arrayOfByte = new byte[TRAILER_SIZE];
+            writeTrailer(arrayOfByte, 0);
+            out.write(arrayOfByte);
+          }
+        } else {
+          super.finish();
+        }
+      }
+
+      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
+      private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
+        throws IOException {
+        writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
+        writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
+      }
+
+      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
+      private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
+        throws IOException {
+        writeShort(paramInt1 & 0xFFFF, paramArrayOfByte, paramInt2);
+        writeShort(paramInt1 >> 16 & 0xFFFF, paramArrayOfByte, paramInt2 + 2);
+      }
+
+      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
+      private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
+        throws IOException {
+        paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF);
+        paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF);
+      }
     }
     }
 
 
     public GzipOutputStream(OutputStream out) throws IOException {
     public GzipOutputStream(OutputStream out) throws IOException {

+ 15 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -106,6 +106,8 @@ public class Client {
 
 
   private SocketFactory socketFactory;           // how to create sockets
   private SocketFactory socketFactory;           // how to create sockets
   private int refCount = 1;
   private int refCount = 1;
+
+  private final int connectionTimeout;
   
   
   final static int PING_CALL_ID = -1;
   final static int PING_CALL_ID = -1;
   
   
@@ -159,7 +161,16 @@ public class Client {
     }
     }
     return -1;
     return -1;
   }
   }
-  
+  /**
+   * set the connection timeout value in configuration
+   * 
+   * @param conf Configuration
+   * @param timeout the socket connect timeout value
+   */
+  public static final void setConnectTimeout(Configuration conf, int timeout) {
+    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
+  }
+
   /**
   /**
    * Increment this client's reference count
    * Increment this client's reference count
    *
    *
@@ -494,8 +505,7 @@ public class Client {
             }
             }
           }
           }
           
           
-          // connection time out is 20s
-          NetUtils.connect(this.socket, server, 20000);
+          NetUtils.connect(this.socket, server, connectionTimeout);
           if (rpcTimeout > 0) {
           if (rpcTimeout > 0) {
             pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
             pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
           }
           }
@@ -1034,6 +1044,8 @@ public class Client {
     this.valueClass = valueClass;
     this.valueClass = valueClass;
     this.conf = conf;
     this.conf = conf;
     this.socketFactory = factory;
     this.socketFactory = factory;
+    this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
+        CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
   }
   }
 
 
   /**
   /**

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
 import java.net.NetworkInterface;
 import java.net.NoRouteToHostException;
 import java.net.NoRouteToHostException;
+import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.SocketAddress;
 import java.net.SocketException;
 import java.net.SocketException;
@@ -865,4 +866,23 @@ public class NetUtils {
     }
     }
     return addrs;
     return addrs;
   }
   }
+
+  /**
+   * Return a free port number. There is no guarantee it will remain free, so
+   * it should be used immediately.
+   *
+   * @returns A free port for binding a local socket
+   */
+  public static int getFreeSocketPort() {
+    int port = 0;
+    try {
+      ServerSocket s = new ServerSocket(0);
+      port = s.getLocalPort();
+      s.close();
+      return port;
+    } catch (IOException e) {
+      // Could not get a free port. Return default port 0.
+    }
+    return port;
+  }
 }
 }

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -18,10 +18,13 @@
 
 
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
+import java.io.BufferedInputStream;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
@@ -148,8 +151,32 @@ public class Credentials implements Writable {
       in.close();
       in.close();
       return credentials;
       return credentials;
     } catch(IOException ioe) {
     } catch(IOException ioe) {
+      throw new IOException("Exception reading " + filename, ioe);
+    } finally {
       IOUtils.cleanup(LOG, in);
       IOUtils.cleanup(LOG, in);
+    }
+  }
+
+  /**
+   * Convenience method for reading a token storage file, and loading the Tokens
+   * therein in the passed UGI
+   * @param filename
+   * @param conf
+   * @throws IOException
+   */
+  public static Credentials readTokenStorageFile(File filename, Configuration conf)
+      throws IOException {
+    DataInputStream in = null;
+    Credentials credentials = new Credentials();
+    try {
+      in = new DataInputStream(new BufferedInputStream(
+          new FileInputStream(filename)));
+      credentials.readTokenStorageStream(in);
+      return credentials;
+    } catch(IOException ioe) {
       throw new IOException("Exception reading " + filename, ioe);
       throw new IOException("Exception reading " + filename, ioe);
+    } finally {
+      IOUtils.cleanup(LOG, in);
     }
     }
   }
   }
   
   

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.security;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 
 
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.AccessControlContext;
 import java.security.AccessControlContext;
@@ -656,10 +657,11 @@ public class UserGroupInformation {
 
 
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         if (fileLocation != null) {
         if (fileLocation != null) {
-          // load the token storage file and put all of the tokens into the
-          // user.
+          // Load the token storage file and put all of the tokens into the
+          // user. Don't use the FileSystem API for reading since it has a lock
+          // cycle (HADOOP-9212).
           Credentials cred = Credentials.readTokenStorageFile(
           Credentials cred = Credentials.readTokenStorageFile(
-              new Path("file:///" + fileLocation), conf);
+              new File(fileLocation), conf);
           loginUser.addCredentials(cred);
           loginUser.addCredentials(cred);
         }
         }
         loginUser.spawnAutoRenewalThreadForUserCreds();
         loginUser.spawnAutoRenewalThreadForUserCreds();

+ 31 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java

@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Generic ID generator
+ * used for generating various types of number sequences.
+ */
+@InterfaceAudience.Private
+public interface IdGenerator {
+
+  /** Increment and then return the next value. */
+  public long nextValue();
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
  * This class is thread safe.
  * This class is thread safe.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public abstract class SequentialNumber {
+public abstract class SequentialNumber implements IdGenerator {
   private final AtomicLong currentValue;
   private final AtomicLong currentValue;
 
 
   /** Create a new instance with the given initial value. */
   /** Create a new instance with the given initial value. */

+ 57 - 0
hadoop-common-project/hadoop-common/src/main/proto/Security.proto

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.security.proto";
+option java_outer_classname = "SecurityProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.common;
+
+/**
+ * Security token identifier
+ */
+message TokenProto {
+  required bytes identifier = 1;
+  required bytes password = 2;
+  required string kind = 3;
+  required string service = 4;
+}
+
+message GetDelegationTokenRequestProto {
+  required string renewer = 1;
+}
+
+message GetDelegationTokenResponseProto {
+  optional hadoop.common.TokenProto token = 1;
+}
+
+message RenewDelegationTokenRequestProto {
+  required hadoop.common.TokenProto token = 1;
+}
+
+message RenewDelegationTokenResponseProto {
+  required uint64 newExpiryTime = 1;
+}
+
+message CancelDelegationTokenRequestProto {
+  required hadoop.common.TokenProto token = 1;
+}
+
+message CancelDelegationTokenResponseProto { // void response
+}
+

+ 13 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo

@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.security.AnnotatedSecurityInfo
 org.apache.hadoop.security.AnnotatedSecurityInfo

+ 18 - 2
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -80,9 +80,17 @@
 
 
 <property>
 <property>
   <name>hadoop.security.group.mapping</name>
   <name>hadoop.security.group.mapping</name>
-  <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+  <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
   <description>
   <description>
-    Class for user to group mapping (get groups for a given user) for ACL
+    Class for user to group mapping (get groups for a given user) for ACL. 
+    The default implementation,
+    org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
+    will determine if the Java Native Interface (JNI) is available. If JNI is 
+    available the implementation will use the API within hadoop to resolve a 
+    list of groups for a user. If JNI is not available then the shell 
+    implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
+    shells out to the Linux/Unix environment with the 
+    <code>bash -c groups</code> command to resolve a list of groups for a user.
   </description>
   </description>
 </property>
 </property>
 
 
@@ -565,6 +573,14 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>ipc.client.connect.timeout</name>
+  <value>20000</value>
+  <description>Indicates the number of milliseconds a client will wait for the 
+               socket to establish a server connection.
+  </description>
+</property>
+
 <property>
 <property>
   <name>ipc.client.connect.max.retries.on.timeouts</name>
   <name>ipc.client.connect.max.retries.on.timeouts</name>
   <value>45</value>
   <value>45</value>

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java

@@ -95,7 +95,7 @@ public abstract class FileContextPermissionBase {
     String filename = "foo";
     String filename = "foo";
     Path f = getTestRootPath(fc, filename);
     Path f = getTestRootPath(fc, filename);
     createFile(fc, filename);
     createFile(fc, filename);
-    doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()),
+    doFilePermissionCheck(FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask()),
                         fc.getFileStatus(f).getPermission());
                         fc.getFileStatus(f).getPermission());
   }
   }
   
   

+ 158 - 39
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java

@@ -23,12 +23,9 @@ import java.io.IOException;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 
 
 /**
 /**
@@ -45,15 +42,13 @@ import org.apache.hadoop.fs.permission.FsPermission;
  * </p>
  * </p>
  */
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
 public abstract class FileSystemContractBaseTest extends TestCase {
+  private static final Log LOG =
+    LogFactory.getLog(FileSystemContractBaseTest.class);
+
   protected final static String TEST_UMASK = "062";
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
   protected FileSystem fs;
-  protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
-  {
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) (i % 10);
-    }
-  }
-  
+  protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
+
   @Override
   @Override
   protected void tearDown() throws Exception {
   protected void tearDown() throws Exception {
     fs.delete(path("/test"), true);
     fs.delete(path("/test"), true);
@@ -235,35 +230,16 @@ public abstract class FileSystemContractBaseTest extends TestCase {
   public void testWriteReadAndDeleteTwoBlocks() throws Exception {
   public void testWriteReadAndDeleteTwoBlocks() throws Exception {
     writeReadAndDelete(getBlockSize() * 2);
     writeReadAndDelete(getBlockSize() * 2);
   }
   }
-  
+
+  /**
+   * Write a dataset, read it back in and verify that they match.
+   * Afterwards, the file is deleted.
+   * @param len length of data
+   * @throws IOException on IO failures
+   */
   protected void writeReadAndDelete(int len) throws IOException {
   protected void writeReadAndDelete(int len) throws IOException {
     Path path = path("/test/hadoop/file");
     Path path = path("/test/hadoop/file");
-    
-    fs.mkdirs(path.getParent());
-
-    FSDataOutputStream out = fs.create(path, false,
-        fs.getConf().getInt("io.file.buffer.size", 4096), 
-        (short) 1, getBlockSize());
-    out.write(data, 0, len);
-    out.close();
-
-    assertTrue("Exists", fs.exists(path));
-    assertEquals("Length", len, fs.getFileStatus(path).getLen());
-
-    FSDataInputStream in = fs.open(path);
-    byte[] buf = new byte[len];
-    in.readFully(0, buf);
-    in.close();
-
-    assertEquals(len, buf.length);
-    for (int i = 0; i < buf.length; i++) {
-      assertEquals("Position " + i, data[i], buf[i]);
-    }
-    
-    assertTrue("Deleted", fs.delete(path, false));
-    
-    assertFalse("No longer exists", fs.exists(path));
-
+    writeAndRead(path, data, len, false, true);
   }
   }
   
   
   public void testOverwrite() throws IOException {
   public void testOverwrite() throws IOException {
@@ -494,4 +470,147 @@ public abstract class FileSystemContractBaseTest extends TestCase {
     assertEquals("Source exists", srcExists, fs.exists(src));
     assertEquals("Source exists", srcExists, fs.exists(src));
     assertEquals("Destination exists", dstExists, fs.exists(dst));
     assertEquals("Destination exists", dstExists, fs.exists(dst));
   }
   }
+
+  /**
+   * Verify that if you take an existing file and overwrite it, the new values
+   * get picked up.
+   * This is a test for the behavior of eventually consistent
+   * filesystems.
+   *
+   * @throws Exception on any failure
+   */
+
+  public void testOverWriteAndRead() throws Exception {
+    int blockSize = getBlockSize();
+
+    byte[] filedata1 = dataset(blockSize * 2, 'A', 26);
+    byte[] filedata2 = dataset(blockSize * 2, 'a', 26);
+    Path path = path("/test/hadoop/file-overwrite");
+    writeAndRead(path, filedata1, blockSize, true, false);
+    writeAndRead(path, filedata2, blockSize, true, false);
+    writeAndRead(path, filedata1, blockSize * 2, true, false);
+    writeAndRead(path, filedata2, blockSize * 2, true, false);
+    writeAndRead(path, filedata1, blockSize, true, false);
+    writeAndRead(path, filedata2, blockSize * 2, true, false);
+  }
+
+  /**
+   *
+   * Write a file and read it in, validating the result. Optional flags control
+   * whether file overwrite operations should be enabled, and whether the
+   * file should be deleted afterwards.
+   *
+   * If there is a mismatch between what was written and what was expected,
+   * a small range of bytes either side of the first error are logged to aid
+   * diagnosing what problem occurred -whether it was a previous file
+   * or a corrupting of the current file. This assumes that two
+   * sequential runs to the same path use datasets with different character
+   * moduli.
+   *
+   * @param path path to write to
+   * @param len length of data
+   * @param overwrite should the create option allow overwrites?
+   * @param delete should the file be deleted afterwards? -with a verification
+   * that it worked. Deletion is not attempted if an assertion has failed
+   * earlier -it is not in a <code>finally{}</code> block.
+   * @throws IOException IO problems
+   */
+  protected void writeAndRead(Path path, byte[] src, int len,
+                              boolean overwrite,
+                              boolean delete) throws IOException {
+    assertTrue("Not enough data in source array to write " + len + " bytes",
+               src.length >= len);
+    fs.mkdirs(path.getParent());
+
+    FSDataOutputStream out = fs.create(path, overwrite,
+                                       fs.getConf().getInt("io.file.buffer.size",
+                                                           4096),
+                                       (short) 1, getBlockSize());
+    out.write(src, 0, len);
+    out.close();
+
+    assertTrue("Exists", fs.exists(path));
+    assertEquals("Length", len, fs.getFileStatus(path).getLen());
+
+    FSDataInputStream in = fs.open(path);
+    byte[] buf = new byte[len];
+    in.readFully(0, buf);
+    in.close();
+
+    assertEquals(len, buf.length);
+    int errors = 0;
+    int first_error_byte = -1;
+    for (int i = 0; i < len; i++) {
+      if (src[i] != buf[i]) {
+        if (errors == 0) {
+          first_error_byte = i;
+        }
+        errors++;
+      }
+    }
+
+    if (errors > 0) {
+      String message = String.format(" %d errors in file of length %d",
+                                     errors, len);
+      LOG.warn(message);
+      // the range either side of the first error to print
+      // this is a purely arbitrary number, to aid user debugging
+      final int overlap = 10;
+      for (int i = Math.max(0, first_error_byte - overlap);
+           i < Math.min(first_error_byte + overlap, len);
+           i++) {
+        byte actual = buf[i];
+        byte expected = src[i];
+        String letter = toChar(actual);
+        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
+        if (expected != actual) {
+          line = String.format("[%04d] %2x %s -expected %2x %s\n",
+                               i,
+                               actual,
+                               letter,
+                               expected,
+                               toChar(expected));
+        }
+        LOG.warn(line);
+      }
+      fail(message);
+    }
+
+    if (delete) {
+      boolean deleted = fs.delete(path, false);
+      assertTrue("Deleted", deleted);
+      assertFalse("No longer exists", fs.exists(path));
+    }
+  }
+
+  /**
+   * Convert a byte to a character for printing. If the
+   * byte value is < 32 -and hence unprintable- the byte is
+   * returned as a two digit hex value
+   * @param b byte
+   * @return the printable character string
+   */
+  protected String toChar(byte b) {
+    if (b >= 0x20) {
+      return Character.toString((char) b);
+    } else {
+      return String.format("%02x", b);
+    }
+  }
+
+  /**
+   * Create a dataset for use in the tests; all data is in the range
+   * base to (base+modulo-1) inclusive
+   * @param len length of data
+   * @param base base of the data
+   * @param modulo the modulo
+   * @return the newly generated dataset
+   */
+  protected byte[] dataset(int len, int base, int modulo) {
+    byte[] dataset = new byte[len];
+    for (int i = 0; i < len; i++) {
+      dataset[i] = (byte) (base + (i % modulo));
+    }
+    return dataset;
+  }
 }
 }

+ 17 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java

@@ -121,7 +121,7 @@ public class TestFileStatus {
     FileStatus fileStatus = new FileStatus(LENGTH, isdir,
     FileStatus fileStatus = new FileStatus(LENGTH, isdir,
         REPLICATION, BLKSIZE, MTIME, PATH);   
         REPLICATION, BLKSIZE, MTIME, PATH);   
     validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME,
     validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME,
-        0, FsPermission.getDefault(), "", "", null, PATH);
+        0, FsPermission.getDirDefault(), "", "", null, PATH);
   }
   }
 
 
   /**
   /**
@@ -131,7 +131,7 @@ public class TestFileStatus {
   public void constructorBlank() throws IOException {
   public void constructorBlank() throws IOException {
     FileStatus fileStatus = new FileStatus();  
     FileStatus fileStatus = new FileStatus();  
     validateAccessors(fileStatus, 0, false, 0, 0, 0,
     validateAccessors(fileStatus, 0, false, 0, 0, 0,
-        0, FsPermission.getDefault(), "", "", null, null);
+        0, FsPermission.getFileDefault(), "", "", null, null);
   }
   }
 
 
   /**
   /**

+ 100 - 31
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import org.junit.Before;
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileInputStream;
@@ -176,12 +177,26 @@ public class TestFileUtil {
       //Expected an IOException
       //Expected an IOException
     }
     }
   }
   }
+
+  @Before
+  public void before() throws IOException {
+    cleanupImpl();
+  }
   
   
   @After
   @After
   public void tearDown() throws IOException {
   public void tearDown() throws IOException {
-    FileUtil.fullyDelete(del);
-    FileUtil.fullyDelete(tmp);
-    FileUtil.fullyDelete(partitioned);
+    cleanupImpl();
+  }
+  
+  private void cleanupImpl() throws IOException  {
+    FileUtil.fullyDelete(del, true);
+    Assert.assertTrue(!del.exists());
+    
+    FileUtil.fullyDelete(tmp, true);
+    Assert.assertTrue(!tmp.exists());
+    
+    FileUtil.fullyDelete(partitioned, true);
+    Assert.assertTrue(!partitioned.exists());
   }
   }
 
 
   @Test
   @Test
@@ -272,12 +287,14 @@ public class TestFileUtil {
     Assert.assertTrue(new File(tmp, FILE).exists());
     Assert.assertTrue(new File(tmp, FILE).exists());
   }
   }
 
 
-  private File xSubDir = new File(del, "xsubdir");
-  private File ySubDir = new File(del, "ysubdir");
-  static String file1Name = "file1";
-  private File file2 = new File(xSubDir, "file2");
-  private File file3 = new File(ySubDir, "file3");
-  private File zlink = new File(del, "zlink");
+  private final File xSubDir = new File(del, "xSubDir");
+  private final File xSubSubDir = new File(xSubDir, "xSubSubDir");
+  private final File ySubDir = new File(del, "ySubDir");
+  private static final String file1Name = "file1";
+  private final File file2 = new File(xSubDir, "file2");
+  private final File file22 = new File(xSubSubDir, "file22");
+  private final File file3 = new File(ySubDir, "file3");
+  private final File zlink = new File(del, "zlink");
   
   
   /**
   /**
    * Creates a directory which can not be deleted completely.
    * Creates a directory which can not be deleted completely.
@@ -289,10 +306,14 @@ public class TestFileUtil {
    *                       |
    *                       |
    *    .---------------------------------------,
    *    .---------------------------------------,
    *    |            |              |           |
    *    |            |              |           |
-   *  file1(!w)   xsubdir(-w)   ysubdir(+w)   zlink
-   *                 |              |
-   *               file2          file3
-   *
+   *  file1(!w)   xSubDir(-rwx)   ySubDir(+w)   zlink
+   *              |  |              |
+   *              | file2(-rwx)   file3
+   *              |
+   *            xSubSubDir(-rwx) 
+   *              |
+   *             file22(-rwx)
+   *             
    * @throws IOException
    * @throws IOException
    */
    */
   private void setupDirsAndNonWritablePermissions() throws IOException {
   private void setupDirsAndNonWritablePermissions() throws IOException {
@@ -305,7 +326,16 @@ public class TestFileUtil {
 
 
     xSubDir.mkdirs();
     xSubDir.mkdirs();
     file2.createNewFile();
     file2.createNewFile();
-    xSubDir.setWritable(false);
+    
+    xSubSubDir.mkdirs();
+    file22.createNewFile();
+    
+    revokePermissions(file22);
+    revokePermissions(xSubSubDir);
+    
+    revokePermissions(file2);
+    revokePermissions(xSubDir);
+    
     ySubDir.mkdirs();
     ySubDir.mkdirs();
     file3.createNewFile();
     file3.createNewFile();
 
 
@@ -317,23 +347,43 @@ public class TestFileUtil {
     FileUtil.symLink(tmpFile.toString(), zlink.toString());
     FileUtil.symLink(tmpFile.toString(), zlink.toString());
   }
   }
   
   
+  private static void grantPermissions(final File f) {
+    f.setReadable(true);
+    f.setWritable(true);
+    f.setExecutable(true);
+  }
+  
+  private static void revokePermissions(final File f) {
+     f.setWritable(false);
+     f.setExecutable(false);
+     f.setReadable(false);
+  }
+  
   // Validates the return value.
   // Validates the return value.
-  // Validates the existence of directory "xsubdir" and the file "file1"
-  // Sets writable permissions for the non-deleted dir "xsubdir" so that it can
-  // be deleted in tearDown().
-  private void validateAndSetWritablePermissions(boolean ret) {
-    xSubDir.setWritable(true);
-    Assert.assertFalse("The return value should have been false!", ret);
-    Assert.assertTrue("The file file1 should not have been deleted!",
+  // Validates the existence of the file "file1"
+  private void validateAndSetWritablePermissions(
+      final boolean expectedRevokedPermissionDirsExist, final boolean ret) {
+    grantPermissions(xSubDir);
+    grantPermissions(xSubSubDir);
+    
+    Assert.assertFalse("The return value should have been false.", ret);
+    Assert.assertTrue("The file file1 should not have been deleted.",
         new File(del, file1Name).exists());
         new File(del, file1Name).exists());
-    Assert.assertTrue(
-        "The directory xsubdir should not have been deleted!",
-        xSubDir.exists());
-    Assert.assertTrue("The file file2 should not have been deleted!",
-        file2.exists());
-    Assert.assertFalse("The directory ysubdir should have been deleted!",
+    
+    Assert.assertEquals(
+        "The directory xSubDir *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, xSubDir.exists());
+    Assert.assertEquals("The file file2 *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, file2.exists());
+    Assert.assertEquals(
+        "The directory xSubSubDir *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, xSubSubDir.exists());
+    Assert.assertEquals("The file file22 *should* not have been deleted.",
+        expectedRevokedPermissionDirsExist, file22.exists());
+    
+    Assert.assertFalse("The directory ySubDir should have been deleted.",
         ySubDir.exists());
         ySubDir.exists());
-    Assert.assertFalse("The link zlink should have been deleted!",
+    Assert.assertFalse("The link zlink should have been deleted.",
         zlink.exists());
         zlink.exists());
   }
   }
 
 
@@ -346,7 +396,15 @@ public class TestFileUtil {
     LOG.info("Running test to verify failure of fullyDelete()");
     LOG.info("Running test to verify failure of fullyDelete()");
     setupDirsAndNonWritablePermissions();
     setupDirsAndNonWritablePermissions();
     boolean ret = FileUtil.fullyDelete(new MyFile(del));
     boolean ret = FileUtil.fullyDelete(new MyFile(del));
-    validateAndSetWritablePermissions(ret);
+    validateAndSetWritablePermissions(true, ret);
+  }
+  
+  @Test
+  public void testFailFullyDeleteGrantPermissions() throws IOException {
+    setupDirsAndNonWritablePermissions();
+    boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
+    // this time the directories with revoked permissions *should* be deleted:
+    validateAndSetWritablePermissions(false, ret);
   }
   }
 
 
   /**
   /**
@@ -395,7 +453,10 @@ public class TestFileUtil {
      */
      */
     @Override
     @Override
     public File[] listFiles() {
     public File[] listFiles() {
-      File[] files = super.listFiles();
+      final File[] files = super.listFiles();
+      if (files == null) {
+         return null;
+      }
       List<File> filesList = Arrays.asList(files);
       List<File> filesList = Arrays.asList(files);
       Collections.sort(filesList);
       Collections.sort(filesList);
       File[] myFiles = new MyFile[files.length];
       File[] myFiles = new MyFile[files.length];
@@ -416,9 +477,17 @@ public class TestFileUtil {
     LOG.info("Running test to verify failure of fullyDeleteContents()");
     LOG.info("Running test to verify failure of fullyDeleteContents()");
     setupDirsAndNonWritablePermissions();
     setupDirsAndNonWritablePermissions();
     boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
     boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
-    validateAndSetWritablePermissions(ret);
+    validateAndSetWritablePermissions(true, ret);
   }
   }
 
 
+  @Test
+  public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
+    setupDirsAndNonWritablePermissions();
+    boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
+    // this time the directories with revoked permissions *should* be deleted:
+    validateAndSetWritablePermissions(false, ret);
+  }
+  
   @Test
   @Test
   public void testCopyMergeSingleDirectory() throws IOException {
   public void testCopyMergeSingleDirectory() throws IOException {
     setupDirs();
     setupDirs();

+ 12 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java

@@ -24,6 +24,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.permission.FsPermission;
 
 
 public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest {
 public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest {
 
 
@@ -47,4 +49,14 @@ public class TestLocalFSFileContextMainOperations extends FileContextMainOperati
     FileContext fc1 = FileContext.getLocalFSFileContext();
     FileContext fc1 = FileContext.getLocalFSFileContext();
     Assert.assertTrue(fc1 != fc);
     Assert.assertTrue(fc1 != fc);
   }
   }
+
+  @Test
+  public void testDefaultFilePermission() throws IOException {
+    Path file = FileContextTestHelper.getTestRootPath(fc,
+        "testDefaultFilePermission");
+    FileContextTestHelper.createFile(fc, file);
+    FsPermission expect = FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask());
+    Assert.assertEquals(expect, fc.getFileStatus(file)
+        .getPermission());
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java

@@ -73,7 +73,7 @@ public class TestLocalFileSystemPermission extends TestCase {
     try {
     try {
       FsPermission initialPermission = getPermission(localfs, f);
       FsPermission initialPermission = getPermission(localfs, f);
       System.out.println(filename + ": " + initialPermission);
       System.out.println(filename + ": " + initialPermission);
-      assertEquals(FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)), initialPermission);
+      assertEquals(FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), initialPermission);
     }
     }
     catch(Exception e) {
     catch(Exception e) {
       System.out.println(StringUtils.stringifyException(e));
       System.out.println(StringUtils.stringifyException(e));

+ 29 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -119,6 +119,18 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     }    
     }    
   }
   }
 
 
+  @SuppressWarnings("serial")
+  public static class LongHeaderServlet extends HttpServlet {
+    @SuppressWarnings("unchecked")
+    @Override
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+    ) throws ServletException, IOException {
+      Assert.assertEquals(63 * 1024, request.getHeader("longheader").length());
+      response.setStatus(HttpServletResponse.SC_OK);
+    }
+  }
+
   @SuppressWarnings("serial")
   @SuppressWarnings("serial")
   public static class HtmlContentServlet extends HttpServlet {
   public static class HtmlContentServlet extends HttpServlet {
     @Override
     @Override
@@ -139,6 +151,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
     server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
+    server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
     server.addJerseyResourcePackage(
     server.addJerseyResourcePackage(
         JerseyResource.class.getPackage().getName(), "/jersey/*");
         JerseyResource.class.getPackage().getName(), "/jersey/*");
     server.start();
     server.start();
@@ -197,6 +210,22 @@ public class TestHttpServer extends HttpServerFunctionalTest {
                  readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
                  readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
   }
   }
 
 
+  /** 
+   *  Test that verifies headers can be up to 64K long. 
+   *  The test adds a 63K header leaving 1K for other headers.
+   *  This is because the header buffer setting is for ALL headers,
+   *  names and values included. */
+  @Test public void testLongHeader() throws Exception {
+    URL url = new URL(baseUrl, "/longheader");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0 ; i < 63 * 1024; i++) {
+      sb.append("a");
+    }
+    conn.setRequestProperty("longheader", sb.toString());
+    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+  }
+
   @Test public void testContentTypes() throws Exception {
   @Test public void testContentTypes() throws Exception {
     // Static CSS files should have text/css
     // Static CSS files should have text/css
     URL cssUrl = new URL(baseUrl, "/static/test.css");
     URL cssUrl = new URL(baseUrl, "/static/test.css");

+ 12 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java

@@ -256,5 +256,17 @@ public class TestCodecFactory extends TestCase {
     checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
     checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
     codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
     codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
     checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
     checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
+    
+    Configuration conf = new Configuration();
+    conf.set("io.compression.codecs", 
+        "   org.apache.hadoop.io.compress.GzipCodec   , " +
+        "    org.apache.hadoop.io.compress.DefaultCodec  , " +
+        " org.apache.hadoop.io.compress.BZip2Codec   ");
+    try {
+      CompressionCodecFactory.getCodecClasses(conf);
+    } catch (IllegalArgumentException e) {
+      fail("IllegalArgumentException is unexpected");
+    }
+
   }
   }
 }
 }

+ 161 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import junit.framework.TestCase;
+
+public class TestCompressionStreamReuse extends TestCase {
+  private static final Log LOG = LogFactory
+      .getLog(TestCompressionStreamReuse.class);
+
+  private Configuration conf = new Configuration();
+  private int count = 10000;
+  private int seed = new Random().nextInt();
+
+  public void testBZip2Codec() throws IOException {
+    resetStateTest(conf, seed, count,
+        "org.apache.hadoop.io.compress.BZip2Codec");
+  }
+
+  public void testGzipCompressStreamReuse() throws IOException {
+    resetStateTest(conf, seed, count,
+        "org.apache.hadoop.io.compress.GzipCodec");
+  }
+
+  public void testGzipCompressStreamReuseWithParam() throws IOException {
+    Configuration conf = new Configuration(this.conf);
+    ZlibFactory
+        .setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
+    ZlibFactory.setCompressionStrategy(conf,
+        CompressionStrategy.HUFFMAN_ONLY);
+    resetStateTest(conf, seed, count,
+        "org.apache.hadoop.io.compress.GzipCodec");
+  }
+
+  private static void resetStateTest(Configuration conf, int seed, int count,
+      String codecClass) throws IOException {
+    // Create the codec
+    CompressionCodec codec = null;
+    try {
+      codec = (CompressionCodec) ReflectionUtils.newInstance(conf
+          .getClassByName(codecClass), conf);
+    } catch (ClassNotFoundException cnfe) {
+      throw new IOException("Illegal codec!");
+    }
+    LOG.info("Created a Codec object of type: " + codecClass);
+
+    // Generate data
+    DataOutputBuffer data = new DataOutputBuffer();
+    RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    for (int i = 0; i < count; ++i) {
+      generator.next();
+      RandomDatum key = generator.getKey();
+      RandomDatum value = generator.getValue();
+
+      key.write(data);
+      value.write(data);
+    }
+    LOG.info("Generated " + count + " records");
+
+    // Compress data
+    DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
+    DataOutputStream deflateOut = new DataOutputStream(
+        new BufferedOutputStream(compressedDataBuffer));
+    CompressionOutputStream deflateFilter = codec
+        .createOutputStream(deflateOut);
+    deflateFilter.write(data.getData(), 0, data.getLength());
+    deflateFilter.finish();
+    deflateFilter.flush();
+    LOG.info("Finished compressing data");
+
+    // reset deflator
+    deflateFilter.resetState();
+    LOG.info("Finished reseting deflator");
+
+    // re-generate data
+    data.reset();
+    generator = new RandomDatum.Generator(seed);
+    for (int i = 0; i < count; ++i) {
+      generator.next();
+      RandomDatum key = generator.getKey();
+      RandomDatum value = generator.getValue();
+
+      key.write(data);
+      value.write(data);
+    }
+    DataInputBuffer originalData = new DataInputBuffer();
+    DataInputStream originalIn = new DataInputStream(
+        new BufferedInputStream(originalData));
+    originalData.reset(data.getData(), 0, data.getLength());
+
+    // re-compress data
+    compressedDataBuffer.reset();
+    deflateOut = new DataOutputStream(new BufferedOutputStream(
+        compressedDataBuffer));
+    deflateFilter = codec.createOutputStream(deflateOut);
+
+    deflateFilter.write(data.getData(), 0, data.getLength());
+    deflateFilter.finish();
+    deflateFilter.flush();
+    LOG.info("Finished re-compressing data");
+
+    // De-compress data
+    DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
+    deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
+        compressedDataBuffer.getLength());
+    CompressionInputStream inflateFilter = codec
+        .createInputStream(deCompressedDataBuffer);
+    DataInputStream inflateIn = new DataInputStream(
+        new BufferedInputStream(inflateFilter));
+
+    // Check
+    for (int i = 0; i < count; ++i) {
+      RandomDatum k1 = new RandomDatum();
+      RandomDatum v1 = new RandomDatum();
+      k1.readFields(originalIn);
+      v1.readFields(originalIn);
+
+      RandomDatum k2 = new RandomDatum();
+      RandomDatum v2 = new RandomDatum();
+      k2.readFields(inflateIn);
+      v2.readFields(inflateIn);
+      assertTrue(
+          "original and compressed-then-decompressed-output not equal",
+          k1.equals(k2) && v1.equals(v2));
+    }
+    LOG.info("SUCCESS! Completed checking " + count + " records");
+  }
+}

+ 15 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java

@@ -67,7 +67,7 @@ public class RPCCallBenchmark implements Tool, Configurable {
     private int serverReaderThreads = 1;
     private int serverReaderThreads = 1;
     private int clientThreads = 0;
     private int clientThreads = 0;
     private String host = "0.0.0.0";
     private String host = "0.0.0.0";
-    private int port = 12345;
+    private int port = 0;
     public int secondsToRun = 15;
     public int secondsToRun = 15;
     private int msgSize = 1024;
     private int msgSize = 1024;
     public Class<? extends RpcEngine> rpcEngine =
     public Class<? extends RpcEngine> rpcEngine =
@@ -201,11 +201,21 @@ public class RPCCallBenchmark implements Tool, Configurable {
       }
       }
     }
     }
     
     
+    public int getPort() {
+      if (port == 0) {
+        port = NetUtils.getFreeSocketPort();
+        if (port == 0) {
+          throw new RuntimeException("Could not find a free port");
+        }
+      }
+      return port;
+    }
+
     @Override
     @Override
     public String toString() {
     public String toString() {
       return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
       return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
           + "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
           + "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
-          + clientThreads + "\nhost=" + host + "\nport=" + port
+          + clientThreads + "\nhost=" + host + "\nport=" + getPort()
           + "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
           + "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
     }
     }
   }
   }
@@ -228,12 +238,12 @@ public class RPCCallBenchmark implements Tool, Configurable {
           .newReflectiveBlockingService(serverImpl);
           .newReflectiveBlockingService(serverImpl);
 
 
       server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
       server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
-          .setInstance(service).setBindAddress(opts.host).setPort(opts.port)
+          .setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
           .setNumHandlers(opts.serverThreads).setVerbose(false).build();
           .setNumHandlers(opts.serverThreads).setVerbose(false).build();
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
       server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
       server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
           .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
           .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
-          .setPort(opts.port).setNumHandlers(opts.serverThreads)
+          .setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
           .setVerbose(false).build();
           .setVerbose(false).build();
     } else {
     } else {
       throw new RuntimeException("Bad engine: " + opts.rpcEngine);
       throw new RuntimeException("Bad engine: " + opts.rpcEngine);
@@ -378,7 +388,7 @@ public class RPCCallBenchmark implements Tool, Configurable {
    * Create a client proxy for the specified engine.
    * Create a client proxy for the specified engine.
    */
    */
   private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
   private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.port);
+    InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort());
     
     
     if (opts.rpcEngine == ProtobufRpcEngine.class) {
     if (opts.rpcEngine == ProtobufRpcEngine.class) {
       final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);
       final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -62,7 +62,6 @@ public class TestIPC {
   final private static Configuration conf = new Configuration();
   final private static Configuration conf = new Configuration();
   final static private int PING_INTERVAL = 1000;
   final static private int PING_INTERVAL = 1000;
   final static private int MIN_SLEEP_TIME = 1000;
   final static private int MIN_SLEEP_TIME = 1000;
-
   /**
   /**
    * Flag used to turn off the fault injection behavior
    * Flag used to turn off the fault injection behavior
    * of the various writables.
    * of the various writables.
@@ -499,6 +498,26 @@ public class TestIPC {
     client.call(new LongWritable(RANDOM.nextLong()),
     client.call(new LongWritable(RANDOM.nextLong()),
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
   }
   }
+
+  @Test
+  public void testIpcConnectTimeout() throws Exception {
+    // start server
+    Server server = new TestServer(1, true);
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    //Intentionally do not start server to get a connection timeout
+
+    // start client
+    Client.setConnectTimeout(conf, 100);
+    Client client = new Client(LongWritable.class, conf);
+    // set the rpc timeout to twice the MIN_SLEEP_TIME
+    try {
+      client.call(new LongWritable(RANDOM.nextLong()),
+              addr, null, null, MIN_SLEEP_TIME*2, conf);
+      fail("Expected an exception to have been thrown");
+    } catch (SocketTimeoutException e) {
+      LOG.info("Get a SocketTimeoutException ", e);
+    }
+  }
   
   
   /**
   /**
    * Check that file descriptors aren't leaked by starting
    * Check that file descriptors aren't leaked by starting

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier

@@ -1,2 +1,15 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
 org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
 org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
 org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier

+ 17 - 1
hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh

@@ -1,3 +1,19 @@
 #!/bin/sh
 #!/bin/sh
-ps -ef | grep apacheds | grep -v grep | cut -f4 -d ' ' |xargs kill -9
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ 
+ps -ef | grep apacheds | grep -v grep | awk '{printf $2"\n"}' | xargs -t --no-run-if-empty kill -9
 
 

+ 0 - 3
hadoop-common-project/pom.xml

@@ -49,9 +49,6 @@
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
     </plugins>
     </plugins>

+ 0 - 3
hadoop-dist/pom.xml

@@ -66,9 +66,6 @@
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
     </plugins>
     </plugins>

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -359,6 +359,8 @@
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
         <configuration>
           <excludes>
           <excludes>
+            <exclude>src/test/resources/classutils.txt</exclude>
+            <exclude>src/main/conf/httpfs-signature.secret</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>

+ 17 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java

@@ -29,6 +29,9 @@ import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.ServletResponse;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
+import java.net.UnknownHostException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * Filter that resolves the requester hostname.
  * Filter that resolves the requester hostname.
@@ -36,6 +39,7 @@ import java.net.InetAddress;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class HostnameFilter implements Filter {
 public class HostnameFilter implements Filter {
   static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
   static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
+  private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class);
 
 
   /**
   /**
    * Initializes the filter.
    * Initializes the filter.
@@ -66,7 +70,19 @@ public class HostnameFilter implements Filter {
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
     throws IOException, ServletException {
     throws IOException, ServletException {
     try {
     try {
-      String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName();
+      String hostname;
+      try {
+        String address = request.getRemoteAddr();
+        if (address != null) {
+          hostname = InetAddress.getByName(address).getCanonicalHostName();
+        } else {
+          log.warn("Request remote address is NULL");
+          hostname = "???";
+        }
+      } catch (UnknownHostException ex) {
+        log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex);
+        hostname = "???";
+      }
       HOSTNAME_TL.set(hostname);
       HOSTNAME_TL.set(hostname);
       chain.doFilter(request, response);
       chain.doFilter(request, response);
     } finally {
     } finally {

+ 26 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java

@@ -64,4 +64,30 @@ public class TestHostnameFilter extends HTestCase {
     filter.destroy();
     filter.destroy();
   }
   }
 
 
+  @Test
+  public void testMissingHostname() throws Exception {
+    ServletRequest request = Mockito.mock(ServletRequest.class);
+    Mockito.when(request.getRemoteAddr()).thenReturn(null);
+
+    ServletResponse response = Mockito.mock(ServletResponse.class);
+
+    final AtomicBoolean invoked = new AtomicBoolean();
+
+    FilterChain chain = new FilterChain() {
+      @Override
+      public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
+        throws IOException, ServletException {
+        assertTrue(HostnameFilter.get().contains("???"));
+        invoked.set(true);
+      }
+    };
+
+    Filter filter = new HostnameFilter();
+    filter.init(null);
+    assertNull(HostnameFilter.get());
+    filter.doFilter(request, response, chain);
+    assertTrue(invoked.get());
+    assertNull(HostnameFilter.get());
+    filter.destroy();
+  }
 }
 }

+ 105 - 4
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -285,15 +285,17 @@ Trunk (Unreleased)
     HDFS-4310. fix test org.apache.hadoop.hdfs.server.datanode.
     HDFS-4310. fix test org.apache.hadoop.hdfs.server.datanode.
     TestStartSecureDataNode (Ivan A. Veselovsky via atm)
     TestStartSecureDataNode (Ivan A. Veselovsky via atm)
 
 
-    HDFS-4274. BlockPoolSliceScanner does not close verification log during
-    shutdown. (Chris Nauroth via suresh)
-
     HDFS-4275. MiniDFSCluster-based tests fail on Windows due to failure
     HDFS-4275. MiniDFSCluster-based tests fail on Windows due to failure
     to delete test namenode directory. (Chris Nauroth via suresh)
     to delete test namenode directory. (Chris Nauroth via suresh)
 
 
     HDFS-4338. TestNameNodeMetrics#testCorruptBlock is flaky. (Andrew Wang via
     HDFS-4338. TestNameNodeMetrics#testCorruptBlock is flaky. (Andrew Wang via
     atm)
     atm)
 
 
+    HDFS-4261. Fix bugs in Balaner causing infinite loop and
+    TestBalancerWithNodeGroup timeing out.  (Junping Du via szetszwo)
+
+    HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh)
+
 Release 2.0.3-alpha - Unreleased 
 Release 2.0.3-alpha - Unreleased 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -301,6 +303,17 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
     HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
     (suresh)
     (suresh)
 
 
+    HDFS-4362. GetDelegationTokenResponseProto does not handle null token.
+    (suresh)
+
+    HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null
+    response. (suresh)
+
+    HDFS-4364. GetLinkTargetResponseProto does not handle null path. (suresh)
+
+    HDFS-4369. GetBlockKeysResponseProto does not handle null response.
+    (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@@ -440,8 +453,54 @@ Release 2.0.3-alpha - Unreleased
 
 
     HDFS-4326. bump up Tomcat version for HttpFS to 6.0.36. (tucu via acmurthy)
     HDFS-4326. bump up Tomcat version for HttpFS to 6.0.36. (tucu via acmurthy)
 
 
+    HDFS-4270. Introduce soft and hard limits for max replication so that
+    replications of the highest priority are allowed to choose a source datanode
+    that has reached its soft limit but not the hard limit.  (Derek Dagit via
+    szetszwo)
+
+    HADOOP-9173. Add security token protobuf definition to common and
+    use it in hdfs. (suresh)
+
+    HDFS-4030. BlockManager excessBlocksCount and
+    postponedMisreplicatedBlocksCount should be AtomicLongs. (eli)
+
+    HDFS-4031. Update findbugsExcludeFile.xml to include findbugs 2
+    exclusions. (eli)
+    
+    HDFS-4033. Miscellaneous findbugs 2 fixes. (eli)
+
+    HDFS-4034. Remove redundant null checks. (eli)
+
+    HDFS-4035. LightWeightGSet and LightWeightHashSet increment a
+    volatile without synchronization. (eli)
+    
+    HDFS-4032. Specify the charset explicitly rather than rely on the
+    default. (eli)
+
+    HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant
+    methods. (suresh)
+
+    HDFS-4377. Some trivial DN comment cleanup. (eli)
+
+    HDFS-4381. Document fsimage format details in FSImageFormat class javadoc.
+    (Jing Zhao via suresh)
+
+    HDFS-4375. Use token request messages defined in hadoop common.
+    (suresh)
+
+    HDFS-4392. Use NetUtils#getFreeSocketPort in MiniDFSCluster.
+    (Andrew Purtell via suresh)
+
+    HDFS-4393. Make empty request and responses in protocol translators can be
+    static final members. (Brandon Li via suresh)
+
+    HDFS-4403. DFSClient can infer checksum type when not provided by reading
+    first byte (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-3429. DataNode reads checksums even if client does not need them (todd)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
     HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
@@ -646,6 +705,29 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4302. Fix fatal exception when starting NameNode with DEBUG logs
     HDFS-4302. Fix fatal exception when starting NameNode with DEBUG logs
     (Eugene Koontz via todd)
     (Eugene Koontz via todd)
 
 
+    HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad
+    VERSION file. (Vinay and Andrew Wang via atm)
+
+    HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. (Binglin
+    Chang via atm)
+
+    HDFS-4384. test_libhdfs_threaded gets SEGV if JNIEnv cannot be
+    initialized. (Colin Patrick McCabe via eli)
+
+    HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth
+    via atm)
+
+    HDFS-4274. BlockPoolSliceScanner does not close verification log during
+    shutdown. (Chris Nauroth via suresh)
+
+    HDFS-1245. Pluggable block id generation. (shv)
+
+    HDFS-4415. HostnameFilter should handle hostname resolution failures and
+    continue processing. (Robert Kanter via atm)
+
+    HDFS-4359. Slow RPC responses from NN can prevent metrics collection on
+    DNs. (liang xie via atm)
+
   BREAKDOWN OF HDFS-3077 SUBTASKS
   BREAKDOWN OF HDFS-3077 SUBTASKS
 
 
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -748,6 +830,11 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
     HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
     (Chao Shi via todd)
     (Chao Shi via todd)
 
 
+    HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
+    needs to be updated when avoiding stale nodes.  (Andrew Wang via szetszwo)
+
+    HDFS-4399. Fix RAT warnings by excluding images sub-dir in docs. (Thomas
+    Graves via acmurthy) 
 
 
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
@@ -2126,6 +2213,18 @@ Release 2.0.0-alpha - 05-23-2012
     
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
 
+Release 0.23.7 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2143,7 +2242,9 @@ Release 0.23.6 - UNRELEASED
     HDFS-4248. Renaming directories may incorrectly remove the paths in leases
     HDFS-4248. Renaming directories may incorrectly remove the paths in leases
     under the tree.  (daryn via szetszwo)
     under the tree.  (daryn via szetszwo)
 
 
-Release 0.23.5 - UNRELEASED
+    HDFS-4385. Maven RAT plugin is not checking all source files (tgraves)
+
+Release 0.23.5 - 2012-11-28
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 

+ 27 - 0
hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt

@@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* classes:
  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
  * POSSIBILITY OF SUCH DAMAGE.
  * POSSIBILITY OF SUCH DAMAGE.
  */
  */
+
+For src/main/native/util/tree.h:
+
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */

+ 34 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -290,4 +290,38 @@
        <Method name="persistPaxosData" />
        <Method name="persistPaxosData" />
        <Bug pattern="OS_OPEN_STREAM" />
        <Bug pattern="OS_OPEN_STREAM" />
      </Match>
      </Match>
+     <!-- Don't complain about LocalDatanodeInfo's anonymous class -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.BlockReaderLocal$LocalDatanodeInfo$1" />
+       <Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
+     </Match>
+     <!-- Only one method increments numFailedVolumes and it is synchronized -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeList" />
+       <Field name="numFailedVolumes" />
+       <Bug pattern="VO_VOLATILE_INCREMENT" />
+     </Match>
+     <!-- Access to pendingReceivedRequests is synchronized -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.datanode.BPServiceActor" />
+       <Method name="notifyNamenodeBlockImmediately" />
+       <Field name="pendingReceivedRequests" />
+       <Bug pattern="VO_VOLATILE_INCREMENT" />
+     </Match>
+     <!-- The "LightWeight" classes are explicitly not thread safe -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.util.LightWeightGSet" />
+       <Field name="modification" />
+       <Bug pattern="VO_VOLATILE_INCREMENT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.util.LightWeightHashSet" />
+       <Field name="modification" />
+       <Bug pattern="VO_VOLATILE_INCREMENT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.util.LightWeightLinkedSet" />
+       <Field name="modification" />
+       <Bug pattern="VO_VOLATILE_INCREMENT" />
+     </Match>
  </FindBugsFilter>
  </FindBugsFilter>

+ 11 - 1
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -420,8 +420,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <executable>protoc</executable>
               <executable>protoc</executable>
               <arguments>
               <arguments>
+                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/hdfs.proto</argument>
                 <argument>src/main/proto/GetUserMappingsProtocol.proto</argument>
                 <argument>src/main/proto/GetUserMappingsProtocol.proto</argument>
                 <argument>src/main/proto/HAZKInfo.proto</argument>
                 <argument>src/main/proto/HAZKInfo.proto</argument>
                 <argument>src/main/proto/InterDatanodeProtocol.proto</argument>
                 <argument>src/main/proto/InterDatanodeProtocol.proto</argument>
@@ -429,7 +431,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <argument>src/main/proto/RefreshAuthorizationPolicyProtocol.proto</argument>
                 <argument>src/main/proto/RefreshAuthorizationPolicyProtocol.proto</argument>
                 <argument>src/main/proto/RefreshUserMappingsProtocol.proto</argument>
                 <argument>src/main/proto/RefreshUserMappingsProtocol.proto</argument>
                 <argument>src/main/proto/datatransfer.proto</argument>
                 <argument>src/main/proto/datatransfer.proto</argument>
-                <argument>src/main/proto/hdfs.proto</argument>
               </arguments>
               </arguments>
             </configuration>
             </configuration>
           </execution>
           </execution>
@@ -442,6 +443,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <executable>protoc</executable>
               <executable>protoc</executable>
               <arguments>
               <arguments>
+                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>src/main/proto/ClientDatanodeProtocol.proto</argument>
                 <argument>src/main/proto/ClientDatanodeProtocol.proto</argument>
@@ -458,6 +460,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <executable>protoc</executable>
               <executable>protoc</executable>
               <arguments>
               <arguments>
+                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>src/main/proto/ClientNamenodeProtocol.proto</argument>
                 <argument>src/main/proto/ClientNamenodeProtocol.proto</argument>
@@ -474,6 +477,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <executable>protoc</executable>
               <executable>protoc</executable>
               <arguments>
               <arguments>
+                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>src/main/proto/QJournalProtocol.proto</argument>
                 <argument>src/main/proto/QJournalProtocol.proto</argument>
@@ -512,9 +516,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/test/resources/data*</exclude>
             <exclude>src/test/resources/data*</exclude>
             <exclude>src/test/resources/editsStored*</exclude>
             <exclude>src/test/resources/editsStored*</exclude>
             <exclude>src/test/resources/empty-file</exclude>
             <exclude>src/test/resources/empty-file</exclude>
+            <exclude>src/main/native/util/tree.h</exclude>
+            <exclude>src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
             <exclude>src/main/docs/releasenotes.html</exclude>
             <exclude>src/contrib/**</exclude>
             <exclude>src/contrib/**</exclude>
+            <exclude>src/site/resources/images/*</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
@@ -559,6 +566,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
                       <arg line="VERBOSE=1"/>
                     </exec>
                     </exec>
+                    <!-- The second make is a workaround for HADOOP-9215.  It can
+                         be removed when version 2.6 of cmake is no longer supported . -->
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
                   </target>
                   </target>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake

@@ -1,3 +1,20 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 #ifndef CONFIG_H
 #ifndef CONFIG_H
 #define CONFIG_H
 #define CONFIG_H
 
 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -143,6 +143,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <executable>protoc</executable>
               <executable>protoc</executable>
               <arguments>
               <arguments>
+                <argument>-I../../../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-Isrc/main/proto/</argument>
                 <argument>-I../../main/proto</argument>
                 <argument>-I../../main/proto</argument>
                 <argument>--java_out=target/generated-sources/java</argument>
                 <argument>--java_out=target/generated-sources/java</argument>

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml

@@ -92,10 +92,11 @@ There is no provision within HDFS for creating user identities, establishing gro
 
 
 <section><title>Group Mapping</title>
 <section><title>Group Mapping</title>
 <p>
 <p>
-Once a username has been determined as described above, the list of groups is determined by a <em>group mapping
-service</em>, configured by the <code>hadoop.security.group.mapping</code> property.
-The default implementation, <code>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</code>, will shell out
-to the Unix <code>bash -c groups</code> command to resolve a list of groups for a user.
+Once a username has been determined as described above, the list of groups is 
+determined by a <em>group mapping service</em>, configured by the 
+<code>hadoop.security.group.mapping</code> property. Refer to the 
+core-default.xml for details of the <code>hadoop.security.group.mapping</code>
+implementation.
 </p>
 </p>
 <p>
 <p>
 An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available
 An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available

+ 139 - 53
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -79,7 +79,6 @@ import javax.net.SocketFactory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
@@ -115,7 +114,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -128,6 +126,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -151,6 +150,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
@@ -363,7 +363,7 @@ public class DFSClient implements java.io.Closeable {
 
 
   /**
   /**
    * Same as this(nameNodeUri, conf, null);
    * Same as this(nameNodeUri, conf, null);
-   * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
+   * @see #DFSClient(URI, Configuration, FileSystem.Statistics)
    */
    */
   public DFSClient(URI nameNodeUri, Configuration conf
   public DFSClient(URI nameNodeUri, Configuration conf
       ) throws IOException {
       ) throws IOException {
@@ -372,7 +372,7 @@ public class DFSClient implements java.io.Closeable {
 
 
   /**
   /**
    * Same as this(nameNodeUri, null, conf, stats);
    * Same as this(nameNodeUri, null, conf, stats);
-   * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) 
+   * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics) 
    */
    */
   public DFSClient(URI nameNodeUri, Configuration conf,
   public DFSClient(URI nameNodeUri, Configuration conf,
                    FileSystem.Statistics stats)
                    FileSystem.Statistics stats)
@@ -1157,8 +1157,8 @@ public class DFSClient implements java.io.Closeable {
 
 
   /**
   /**
    * Call {@link #create(String, FsPermission, EnumSet, short, long, 
    * Call {@link #create(String, FsPermission, EnumSet, short, long, 
-   * Progressable, int)} with default <code>permission</code>
-   * {@link FsPermission#getDefault()}.
+   * Progressable, int, ChecksumOpt)} with default <code>permission</code>
+   * {@link FsPermission#getFileDefault()}.
    * 
    * 
    * @param src File name
    * @param src File name
    * @param overwrite overwrite an existing file if true
    * @param overwrite overwrite an existing file if true
@@ -1176,7 +1176,7 @@ public class DFSClient implements java.io.Closeable {
                              Progressable progress,
                              Progressable progress,
                              int buffersize)
                              int buffersize)
       throws IOException {
       throws IOException {
-    return create(src, FsPermission.getDefault(),
+    return create(src, FsPermission.getFileDefault(),
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
             : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
             : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
         buffersize, null);
         buffersize, null);
@@ -1207,7 +1207,7 @@ public class DFSClient implements java.io.Closeable {
    * 
    * 
    * @param src File name
    * @param src File name
    * @param permission The permission of the directory being created.
    * @param permission The permission of the directory being created.
-   *          If null, use default permission {@link FsPermission#getDefault()}
+   *          If null, use default permission {@link FsPermission#getFileDefault()}
    * @param flag indicates create a new file or create/overwrite an
    * @param flag indicates create a new file or create/overwrite an
    *          existing file or append to an existing file
    *          existing file or append to an existing file
    * @param createParent create missing parent directory if true
    * @param createParent create missing parent directory if true
@@ -1233,7 +1233,7 @@ public class DFSClient implements java.io.Closeable {
                              ChecksumOpt checksumOpt) throws IOException {
                              ChecksumOpt checksumOpt) throws IOException {
     checkOpen();
     checkOpen();
     if (permission == null) {
     if (permission == null) {
-      permission = FsPermission.getDefault();
+      permission = FsPermission.getFileDefault();
     }
     }
     FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
     FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
@@ -1268,7 +1268,7 @@ public class DFSClient implements java.io.Closeable {
   
   
   /**
   /**
    * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
    * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
-   *  Progressable, int)} except that the permission
+   *  Progressable, int, ChecksumOpt)} except that the permission
    *  is absolute (ie has already been masked with umask.
    *  is absolute (ie has already been masked with umask.
    */
    */
   public DFSOutputStream primitiveCreate(String src, 
   public DFSOutputStream primitiveCreate(String src, 
@@ -1453,7 +1453,7 @@ public class DFSClient implements java.io.Closeable {
   }
   }
   /**
   /**
    * Delete file or directory.
    * Delete file or directory.
-   * See {@link ClientProtocol#delete(String)}. 
+   * See {@link ClientProtocol#delete(String, boolean)}. 
    */
    */
   @Deprecated
   @Deprecated
   public boolean delete(String src) throws IOException {
   public boolean delete(String src) throws IOException {
@@ -1563,7 +1563,7 @@ public class DFSClient implements java.io.Closeable {
    */
    */
   public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
   public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
     checkOpen();
     checkOpen();
-    return getFileChecksum(src, namenode, socketFactory,
+    return getFileChecksum(src, clientName, namenode, socketFactory,
         dfsClientConf.socketTimeout, getDataEncryptionKey(),
         dfsClientConf.socketTimeout, getDataEncryptionKey(),
         dfsClientConf.connectToDnViaHostname);
         dfsClientConf.connectToDnViaHostname);
   }
   }
@@ -1592,8 +1592,7 @@ public class DFSClient implements java.io.Closeable {
     if (shouldEncryptData()) {
     if (shouldEncryptData()) {
       synchronized (this) {
       synchronized (this) {
         if (encryptionKey == null ||
         if (encryptionKey == null ||
-            (encryptionKey != null &&
-             encryptionKey.expiryDate < Time.now())) {
+            encryptionKey.expiryDate < Time.now()) {
           LOG.debug("Getting new encryption token from NN");
           LOG.debug("Getting new encryption token from NN");
           encryptionKey = namenode.getDataEncryptionKey();
           encryptionKey = namenode.getDataEncryptionKey();
         }
         }
@@ -1607,9 +1606,16 @@ public class DFSClient implements java.io.Closeable {
   /**
   /**
    * Get the checksum of a file.
    * Get the checksum of a file.
    * @param src The file path
    * @param src The file path
+   * @param clientName the name of the client requesting the checksum.
+   * @param namenode the RPC proxy for the namenode
+   * @param socketFactory to create sockets to connect to DNs
+   * @param socketTimeout timeout to use when connecting and waiting for a response
+   * @param encryptionKey the key needed to communicate with DNs in this cluster
+   * @param connectToDnViaHostname {@see #connectToDnViaHostname()}
    * @return The checksum 
    * @return The checksum 
    */
    */
-  public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+  static MD5MD5CRC32FileChecksum getFileChecksum(String src,
+      String clientName,
       ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
       ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
       DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
       DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
       throws IOException {
       throws IOException {
@@ -1644,32 +1650,16 @@ public class DFSClient implements java.io.Closeable {
       final int timeout = 3000 * datanodes.length + socketTimeout;
       final int timeout = 3000 * datanodes.length + socketTimeout;
       boolean done = false;
       boolean done = false;
       for(int j = 0; !done && j < datanodes.length; j++) {
       for(int j = 0; !done && j < datanodes.length; j++) {
-        Socket sock = null;
         DataOutputStream out = null;
         DataOutputStream out = null;
         DataInputStream in = null;
         DataInputStream in = null;
         
         
         try {
         try {
           //connect to a datanode
           //connect to a datanode
-          sock = socketFactory.createSocket();
-          String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Connecting to datanode " + dnAddr);
-          }
-          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
-          sock.setSoTimeout(timeout);
-
-          OutputStream unbufOut = NetUtils.getOutputStream(sock);
-          InputStream unbufIn = NetUtils.getInputStream(sock);
-          if (encryptionKey != null) {
-            IOStreamPair encryptedStreams =
-                DataTransferEncryptor.getEncryptedStreams(
-                    unbufOut, unbufIn, encryptionKey);
-            unbufOut = encryptedStreams.out;
-            unbufIn = encryptedStreams.in;
-          }
-          out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+          IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+              encryptionKey, datanodes[j], timeout);
+          out = new DataOutputStream(new BufferedOutputStream(pair.out,
               HdfsConstants.SMALL_BUFFER_SIZE));
               HdfsConstants.SMALL_BUFFER_SIZE));
-          in = new DataInputStream(unbufIn);
+          in = new DataInputStream(pair.in);
 
 
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("write to " + datanodes[j] + ": "
             LOG.debug("write to " + datanodes[j] + ": "
@@ -1679,22 +1669,11 @@ public class DFSClient implements java.io.Closeable {
           new Sender(out).blockChecksum(block, lb.getBlockToken());
           new Sender(out).blockChecksum(block, lb.getBlockToken());
 
 
           final BlockOpResponseProto reply =
           final BlockOpResponseProto reply =
-            BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));
+            BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
 
 
           if (reply.getStatus() != Status.SUCCESS) {
           if (reply.getStatus() != Status.SUCCESS) {
-            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
-                && i > lastRetriedIndex) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
-                    + "for file " + src + " for block " + block
-                    + " from datanode " + datanodes[j]
-                    + ". Will retry the block once.");
-              }
-              lastRetriedIndex = i;
-              done = true; // actually it's not done; but we'll retry
-              i--; // repeat at i-th block
-              refetchBlocks = true;
-              break;
+            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+              throw new InvalidBlockTokenException();
             } else {
             } else {
               throw new IOException("Bad response " + reply + " for block "
               throw new IOException("Bad response " + reply + " for block "
                   + block + " from datanode " + datanodes[j]);
                   + block + " from datanode " + datanodes[j]);
@@ -1726,8 +1705,18 @@ public class DFSClient implements java.io.Closeable {
           md5.write(md5out);
           md5.write(md5out);
           
           
           // read crc-type
           // read crc-type
-          final DataChecksum.Type ct = HdfsProtoUtil.
-              fromProto(checksumData.getCrcType());
+          final DataChecksum.Type ct;
+          if (checksumData.hasCrcType()) {
+            ct = PBHelper.convert(checksumData
+                .getCrcType());
+          } else {
+            LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
+                      "inferring checksum by reading first byte");
+            ct = inferChecksumTypeByReading(
+                clientName, socketFactory, socketTimeout, lb, datanodes[j],
+                encryptionKey, connectToDnViaHostname);
+          }
+
           if (i == 0) { // first block
           if (i == 0) { // first block
             crcType = ct;
             crcType = ct;
           } else if (crcType != DataChecksum.Type.MIXED
           } else if (crcType != DataChecksum.Type.MIXED
@@ -1745,12 +1734,25 @@ public class DFSClient implements java.io.Closeable {
             }
             }
             LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
             LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
           }
           }
+        } catch (InvalidBlockTokenException ibte) {
+          if (i > lastRetriedIndex) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
+                  + "for file " + src + " for block " + block
+                  + " from datanode " + datanodes[j]
+                  + ". Will retry the block once.");
+            }
+            lastRetriedIndex = i;
+            done = true; // actually it's not done; but we'll retry
+            i--; // repeat at i-th block
+            refetchBlocks = true;
+            break;
+          }
         } catch (IOException ie) {
         } catch (IOException ie) {
           LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
           LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
         } finally {
         } finally {
           IOUtils.closeStream(in);
           IOUtils.closeStream(in);
           IOUtils.closeStream(out);
           IOUtils.closeStream(out);
-          IOUtils.closeSocket(sock);        
         }
         }
       }
       }
 
 
@@ -1782,6 +1784,90 @@ public class DFSClient implements java.io.Closeable {
     }
     }
   }
   }
 
 
+  /**
+   * Connect to the given datanode's datantrasfer port, and return
+   * the resulting IOStreamPair. This includes encryption wrapping, etc.
+   */
+  private static IOStreamPair connectToDN(
+      SocketFactory socketFactory, boolean connectToDnViaHostname,
+      DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
+      throws IOException
+  {
+    boolean success = false;
+    Socket sock = null;
+    try {
+      sock = socketFactory.createSocket();
+      String dnAddr = dn.getXferAddr(connectToDnViaHostname);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Connecting to datanode " + dnAddr);
+      }
+      NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
+      sock.setSoTimeout(timeout);
+  
+      OutputStream unbufOut = NetUtils.getOutputStream(sock);
+      InputStream unbufIn = NetUtils.getInputStream(sock);
+      IOStreamPair ret;
+      if (encryptionKey != null) {
+        ret = DataTransferEncryptor.getEncryptedStreams(
+                unbufOut, unbufIn, encryptionKey);
+      } else {
+        ret = new IOStreamPair(unbufIn, unbufOut);        
+      }
+      success = true;
+      return ret;
+    } finally {
+      if (!success) {
+        IOUtils.closeSocket(sock);
+      }
+    }
+  }
+  
+  /**
+   * Infer the checksum type for a replica by sending an OP_READ_BLOCK
+   * for the first byte of that replica. This is used for compatibility
+   * with older HDFS versions which did not include the checksum type in
+   * OpBlockChecksumResponseProto.
+   *
+   * @param in input stream from datanode
+   * @param out output stream to datanode
+   * @param lb the located block
+   * @param clientName the name of the DFSClient requesting the checksum
+   * @param dn the connected datanode
+   * @return the inferred checksum type
+   * @throws IOException if an error occurs
+   */
+  private static Type inferChecksumTypeByReading(
+      String clientName, SocketFactory socketFactory, int socketTimeout,
+      LocatedBlock lb, DatanodeInfo dn,
+      DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+      throws IOException {
+    IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
+        encryptionKey, dn, socketTimeout);
+
+    try {
+      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
+          HdfsConstants.SMALL_BUFFER_SIZE));
+      DataInputStream in = new DataInputStream(pair.in);
+  
+      new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
+      final BlockOpResponseProto reply =
+          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
+      
+      if (reply.getStatus() != Status.SUCCESS) {
+        if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+          throw new InvalidBlockTokenException();
+        } else {
+          throw new IOException("Bad response " + reply + " trying to read "
+              + lb.getBlock() + " from datanode " + dn);
+        }
+      }
+      
+      return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
+    } finally {
+      IOUtils.cleanup(null, pair.in, pair.out);
+    }
+  }
+
   /**
   /**
    * Set permissions to a file or directory.
    * Set permissions to a file or directory.
    * @param src path name.
    * @param src path name.
@@ -1889,7 +1975,7 @@ public class DFSClient implements java.io.Closeable {
    * @param isChecked
    * @param isChecked
    *          If true, then check only active namenode's safemode status, else
    *          If true, then check only active namenode's safemode status, else
    *          check first namenode's status.
    *          check first namenode's status.
-   * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeActio,boolean)
+   * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
    */
    */
   public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
   public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
     return namenode.setSafeMode(action, isChecked);    
     return namenode.setSafeMode(action, isChecked);    

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -143,6 +143,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
   public static final int     DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
   public static final int     DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
+  public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
+  public static final int     DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = false;
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = false;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
@@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -883,7 +883,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
 
 
         //ack
         //ack
         BlockOpResponseProto response =
         BlockOpResponseProto response =
-          BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));
+          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
         if (SUCCESS != response.getStatus()) {
         if (SUCCESS != response.getStatus()) {
           throw new IOException("Failed to add a datanode");
           throw new IOException("Failed to add a datanode");
         }
         }
@@ -1073,7 +1073,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
   
   
           // receive ack for connect
           // receive ack for connect
           BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
           BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
-              HdfsProtoUtil.vintPrefixed(blockReplyStream));
+              PBHelper.vintPrefixed(blockReplyStream));
           pipelineStatus = resp.getStatus();
           pipelineStatus = resp.getStatus();
           firstBadLink = resp.getFirstBadLink();
           firstBadLink = resp.getFirstBadLink();
           
           

+ 8 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -80,6 +80,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
+import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
@@ -222,12 +223,7 @@ public class DFSUtil {
    * Converts a string to a byte array using UTF8 encoding.
    * Converts a string to a byte array using UTF8 encoding.
    */
    */
   public static byte[] string2Bytes(String str) {
   public static byte[] string2Bytes(String str) {
-    try {
-      return str.getBytes("UTF8");
-    } catch(UnsupportedEncodingException e) {
-      assert false : "UTF8 encoding is not supported ";
-    }
-    return null;
+    return str.getBytes(Charsets.UTF_8);
   }
   }
 
 
   /**
   /**
@@ -239,19 +235,14 @@ public class DFSUtil {
     if (pathComponents.length == 1 && pathComponents[0].length == 0) {
     if (pathComponents.length == 1 && pathComponents[0].length == 0) {
       return Path.SEPARATOR;
       return Path.SEPARATOR;
     }
     }
-    try {
-      StringBuilder result = new StringBuilder();
-      for (int i = 0; i < pathComponents.length; i++) {
-        result.append(new String(pathComponents[i], "UTF-8"));
-        if (i < pathComponents.length - 1) {
-          result.append(Path.SEPARATOR_CHAR);
-        }
+    StringBuilder result = new StringBuilder();
+    for (int i = 0; i < pathComponents.length; i++) {
+      result.append(new String(pathComponents[i], Charsets.UTF_8));
+      if (i < pathComponents.length - 1) {
+        result.append(Path.SEPARATOR_CHAR);
       }
       }
-      return result.toString();
-    } catch (UnsupportedEncodingException ex) {
-      assert false : "UTF8 encoding is not supported ";
     }
     }
-    return null;
+    return result.toString();
   }
   }
 
 
   /** Convert an object representing a path to a string. */
   /** Convert an object representing a path to a string. */

+ 12 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java

@@ -40,14 +40,18 @@ import org.apache.hadoop.tools.GetUserMappingsProtocol;
 public class HDFSPolicyProvider extends PolicyProvider {
 public class HDFSPolicyProvider extends PolicyProvider {
   private static final Service[] hdfsServices =
   private static final Service[] hdfsServices =
     new Service[] {
     new Service[] {
-    new Service("security.client.protocol.acl", ClientProtocol.class),
-    new Service("security.client.datanode.protocol.acl", 
-                ClientDatanodeProtocol.class),
-    new Service("security.datanode.protocol.acl", DatanodeProtocol.class),
-    new Service("security.inter.datanode.protocol.acl", 
-                InterDatanodeProtocol.class),
-    new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
-    new Service("security.qjournal.service.protocol.acl", QJournalProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_CLIENT_PROTOCOL_ACL,
+        ClientProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_CLIENT_DATANODE_PROTOCOL_ACL,
+        ClientDatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_DATANODE_PROTOCOL_ACL,
+        DatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_INTER_DATANODE_PROTOCOL_ACL, 
+        InterDatanodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_NAMENODE_PROTOCOL_ACL,
+        NamenodeProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL,
+        QJournalProtocol.class),
     new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
     new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
         HAServiceProtocol.class),
         HAServiceProtocol.class),
     new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
     new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
-
 import java.io.BufferedInputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
@@ -39,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -381,7 +380,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
     // in and out will be closed when sock is closed (by the caller)
     // in and out will be closed when sock is closed (by the caller)
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
           NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
           NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum);
     
     
     //
     //
     // Get bytes in block, set streams
     // Get bytes in block, set streams
@@ -392,7 +392,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
                                 bufferSize));
                                 bufferSize));
     
     
     BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
     BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
-        vintPrefixed(in));
+        PBHelper.vintPrefixed(in));
     RemoteBlockReader2.checkSuccess(status, sock, block, file);
     RemoteBlockReader2.checkSuccess(status, sock, block, file);
     ReadOpChecksumInfoProto checksumInfo =
     ReadOpChecksumInfoProto checksumInfo =
       status.getReadOpChecksumInfo();
       status.getReadOpChecksumInfo();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
-
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
@@ -43,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.net.SocketInputWrapper;
 import org.apache.hadoop.net.SocketInputWrapper;
@@ -393,7 +392,8 @@ public class RemoteBlockReader2  implements BlockReader {
     // in and out will be closed when sock is closed (by the caller)
     // in and out will be closed when sock is closed (by the caller)
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
           ioStreams.out));
           ioStreams.out));
-    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
+    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
+        verifyChecksum);
 
 
     //
     //
     // Get bytes in block
     // Get bytes in block
@@ -401,7 +401,7 @@ public class RemoteBlockReader2  implements BlockReader {
     DataInputStream in = new DataInputStream(ioStreams.in);
     DataInputStream in = new DataInputStream(ioStreams.in);
 
 
     BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
     BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
-        vintPrefixed(in));
+        PBHelper.vintPrefixed(in));
     checkSuccess(status, sock, block, file);
     checkSuccess(status, sock, block, file);
     ReadOpChecksumInfoProto checksumInfo =
     ReadOpChecksumInfoProto checksumInfo =
       status.getReadOpChecksumInfo();
       status.getReadOpChecksumInfo();

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java

@@ -67,7 +67,10 @@ public class HdfsFileStatus {
     this.modification_time = modification_time;
     this.modification_time = modification_time;
     this.access_time = access_time;
     this.access_time = access_time;
     this.permission = (permission == null) ? 
     this.permission = (permission == null) ? 
-                      FsPermission.getDefault() : permission;
+        ((isdir || symlink!=null) ? 
+            FsPermission.getDefault() : 
+            FsPermission.getFileDefault()) :
+        permission;
     this.owner = (owner == null) ? "" : owner;
     this.owner = (owner == null) ? "" : owner;
     this.group = (group == null) ? "" : group;
     this.group = (group == null) ? "" : group;
     this.symlink = symlink;
     this.symlink = symlink;

+ 0 - 179
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java

@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.security.token.Token;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedInputStream;
-
-/**
- * Utilities for converting to and from protocol buffers used in the
- * HDFS wire protocol, as well as some generic utilities useful
- * for dealing with protocol buffers.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public abstract class HdfsProtoUtil {
-  
-  //// Block Token ////
-  
-  public static HdfsProtos.BlockTokenIdentifierProto toProto(Token<?> blockToken) {
-    return HdfsProtos.BlockTokenIdentifierProto.newBuilder()
-      .setIdentifier(ByteString.copyFrom(blockToken.getIdentifier()))
-      .setPassword(ByteString.copyFrom(blockToken.getPassword()))
-      .setKind(blockToken.getKind().toString())
-      .setService(blockToken.getService().toString())
-      .build();
-  }
-
-  public static Token<BlockTokenIdentifier> fromProto(HdfsProtos.BlockTokenIdentifierProto proto) {
-    return new Token<BlockTokenIdentifier>(proto.getIdentifier().toByteArray(),
-        proto.getPassword().toByteArray(),
-        new Text(proto.getKind()),
-        new Text(proto.getService()));
-  }
-
-  //// Extended Block ////
-  
-  public static HdfsProtos.ExtendedBlockProto toProto(ExtendedBlock block) {
-    return HdfsProtos.ExtendedBlockProto.newBuilder()
-      .setBlockId(block.getBlockId())
-      .setPoolId(block.getBlockPoolId())
-      .setNumBytes(block.getNumBytes())
-      .setGenerationStamp(block.getGenerationStamp())
-      .build();
-  }
-    
-  public static ExtendedBlock fromProto(HdfsProtos.ExtendedBlockProto proto) {
-    return new ExtendedBlock(
-        proto.getPoolId(), proto.getBlockId(),
-        proto.getNumBytes(), proto.getGenerationStamp());
-  }
-
-  //// DatanodeID ////
-  
-  private static HdfsProtos.DatanodeIDProto toProto(
-      DatanodeID dni) {
-    return HdfsProtos.DatanodeIDProto.newBuilder()
-      .setIpAddr(dni.getIpAddr())
-      .setHostName(dni.getHostName())
-      .setStorageID(dni.getStorageID())
-      .setXferPort(dni.getXferPort())
-      .setInfoPort(dni.getInfoPort())
-      .setIpcPort(dni.getIpcPort())
-      .build();
-  }
-  
-  private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
-    return new DatanodeID(
-        idProto.getIpAddr(),
-        idProto.getHostName(),
-        idProto.getStorageID(),
-        idProto.getXferPort(),
-        idProto.getInfoPort(),
-        idProto.getIpcPort());
-  }
-  
-  //// DatanodeInfo ////
-  
-  public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) {
-    return HdfsProtos.DatanodeInfoProto.newBuilder()
-      .setId(toProto((DatanodeID)dni))
-      .setCapacity(dni.getCapacity())
-      .setDfsUsed(dni.getDfsUsed())
-      .setRemaining(dni.getRemaining())
-      .setBlockPoolUsed(dni.getBlockPoolUsed())
-      .setLastUpdate(dni.getLastUpdate())
-      .setXceiverCount(dni.getXceiverCount())
-      .setLocation(dni.getNetworkLocation())
-      .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf(
-          dni.getAdminState().name()))
-      .build();
-  }
-
-  public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) {
-    DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()),
-        dniProto.getLocation());
-
-    dniObj.setCapacity(dniProto.getCapacity());
-    dniObj.setDfsUsed(dniProto.getDfsUsed());
-    dniObj.setRemaining(dniProto.getRemaining());
-    dniObj.setBlockPoolUsed(dniProto.getBlockPoolUsed());
-    dniObj.setLastUpdate(dniProto.getLastUpdate());
-    dniObj.setXceiverCount(dniProto.getXceiverCount());
-    dniObj.setAdminState(DatanodeInfo.AdminStates.valueOf(
-        dniProto.getAdminState().name()));
-    return dniObj;
-  }
-  
-  public static ArrayList<? extends HdfsProtos.DatanodeInfoProto> toProtos(
-      DatanodeInfo[] dnInfos, int startIdx) {
-    ArrayList<HdfsProtos.DatanodeInfoProto> protos =
-      Lists.newArrayListWithCapacity(dnInfos.length);
-    for (int i = startIdx; i < dnInfos.length; i++) {
-      protos.add(toProto(dnInfos[i]));
-    }
-    return protos;
-  }
-  
-  public static DatanodeInfo[] fromProtos(
-      List<HdfsProtos.DatanodeInfoProto> targetsList) {
-    DatanodeInfo[] ret = new DatanodeInfo[targetsList.size()];
-    int i = 0;
-    for (HdfsProtos.DatanodeInfoProto proto : targetsList) {
-      ret[i++] = fromProto(proto);
-    }
-    return ret;
-  }
-
-  public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
-    return DataChecksum.Type.valueOf(type.getNumber());
-  }
-
-  public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
-    return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
-  }
-
-  public static InputStream vintPrefixed(final InputStream input)
-  throws IOException {
-    final int firstByte = input.read();
-    if (firstByte == -1) {
-      throw new EOFException("Premature EOF: no length prefix available");
-    }
-    
-    int size = CodedInputStream.readRawVarint32(firstByte, input);
-    assert size >= 0;
-  
-    return new ExactSizeInputStream(input, size);
-  }
-}

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 
 
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslOutputStream;
 import org.apache.hadoop.security.SaslOutputStream;
 
 
+import com.google.common.base.Charsets;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 
 
@@ -399,7 +400,7 @@ public class DataTransferEncryptor {
       DataEncryptionKey encryptionKey) {
       DataEncryptionKey encryptionKey) {
     return encryptionKey.keyId + NAME_DELIMITER +
     return encryptionKey.keyId + NAME_DELIMITER +
         encryptionKey.blockPoolId + NAME_DELIMITER +
         encryptionKey.blockPoolId + NAME_DELIMITER +
-        new String(Base64.encodeBase64(encryptionKey.nonce, false));
+        new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
   }
   }
   
   
   /**
   /**
@@ -427,7 +428,7 @@ public class DataTransferEncryptor {
   }
   }
   
   
   private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
   private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
-    return new String(Base64.encodeBase64(encryptionKey, false)).toCharArray();
+    return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8).toCharArray();
   }
   }
   
   
   /**
   /**

+ 8 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java

@@ -21,12 +21,12 @@ package org.apache.hadoop.hdfs.protocol.datatransfer;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
@@ -41,23 +41,17 @@ import org.apache.hadoop.util.DataChecksum;
 public abstract class DataTransferProtoUtil {
 public abstract class DataTransferProtoUtil {
   static BlockConstructionStage fromProto(
   static BlockConstructionStage fromProto(
       OpWriteBlockProto.BlockConstructionStage stage) {
       OpWriteBlockProto.BlockConstructionStage stage) {
-    return BlockConstructionStage.valueOf(BlockConstructionStage.class,
-        stage.name());
+    return BlockConstructionStage.valueOf(stage.name());
   }
   }
 
 
   static OpWriteBlockProto.BlockConstructionStage toProto(
   static OpWriteBlockProto.BlockConstructionStage toProto(
       BlockConstructionStage stage) {
       BlockConstructionStage stage) {
-    return OpWriteBlockProto.BlockConstructionStage.valueOf(
-        stage.name());
+    return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
   }
   }
 
 
   public static ChecksumProto toProto(DataChecksum checksum) {
   public static ChecksumProto toProto(DataChecksum checksum) {
-    ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType());
-    if (type == null) {
-      throw new IllegalArgumentException(
-          "Can't convert checksum to protobuf: " + checksum);
-    }
-
+    ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
+    // ChecksumType#valueOf never returns null
     return ChecksumProto.newBuilder()
     return ChecksumProto.newBuilder()
       .setBytesPerChecksum(checksum.getBytesPerChecksum())
       .setBytesPerChecksum(checksum.getBytesPerChecksum())
       .setType(type)
       .setType(type)
@@ -68,8 +62,7 @@ public abstract class DataTransferProtoUtil {
     if (proto == null) return null;
     if (proto == null) return null;
 
 
     int bytesPerChecksum = proto.getBytesPerChecksum();
     int bytesPerChecksum = proto.getBytesPerChecksum();
-    DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType());
-    
+    DataChecksum.Type type = PBHelper.convert(proto.getType());
     return DataChecksum.newDataChecksum(type, bytesPerChecksum);
     return DataChecksum.newDataChecksum(type, bytesPerChecksum);
   }
   }
 
 
@@ -86,8 +79,8 @@ public abstract class DataTransferProtoUtil {
   static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
   static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
       Token<BlockTokenIdentifier> blockToken) {
       Token<BlockTokenIdentifier> blockToken) {
     return BaseHeaderProto.newBuilder()
     return BaseHeaderProto.newBuilder()
-      .setBlock(HdfsProtoUtil.toProto(blk))
-      .setToken(HdfsProtoUtil.toProto(blockToken))
+      .setBlock(PBHelper.convert(blk))
+      .setToken(PBHelper.convert(blockToken))
       .build();
       .build();
   }
   }
 }
 }

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java

@@ -55,12 +55,15 @@ public interface DataTransferProtocol {
    * @param clientName client's name.
    * @param clientName client's name.
    * @param blockOffset offset of the block.
    * @param blockOffset offset of the block.
    * @param length maximum number of bytes for this read.
    * @param length maximum number of bytes for this read.
+   * @param sendChecksum if false, the DN should skip reading and sending
+   *        checksums
    */
    */
   public void readBlock(final ExtendedBlock blk,
   public void readBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final long blockOffset,
       final long blockOffset,
-      final long length) throws IOException;
+      final long length,
+      final boolean sendChecksum) throws IOException;
 
 
   /**
   /**
    * Write a block to a datanode pipeline.
    * Write a block to a datanode pipeline.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;

+ 20 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -17,9 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProto;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProtos;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
 
 
 import java.io.DataInputStream;
 import java.io.DataInputStream;
@@ -33,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 
 
 /** Receiver */
 /** Receiver */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -85,21 +84,22 @@ public abstract class Receiver implements DataTransferProtocol {
   /** Receive OP_READ_BLOCK */
   /** Receive OP_READ_BLOCK */
   private void opReadBlock() throws IOException {
   private void opReadBlock() throws IOException {
     OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
     OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
-    readBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()),
-        fromProto(proto.getHeader().getBaseHeader().getToken()),
+    readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
         proto.getOffset(),
         proto.getOffset(),
-        proto.getLen());
+        proto.getLen(),
+        proto.getSendChecksums());
   }
   }
   
   
   /** Receive OP_WRITE_BLOCK */
   /** Receive OP_WRITE_BLOCK */
   private void opWriteBlock(DataInputStream in) throws IOException {
   private void opWriteBlock(DataInputStream in) throws IOException {
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
-    writeBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()),
-        fromProto(proto.getHeader().getBaseHeader().getToken()),
+    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
-        fromProtos(proto.getTargetsList()),
-        fromProto(proto.getSource()),
+        PBHelper.convert(proto.getTargetsList()),
+        PBHelper.convert(proto.getSource()),
         fromProto(proto.getStage()),
         fromProto(proto.getStage()),
         proto.getPipelineSize(),
         proto.getPipelineSize(),
         proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
         proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
@@ -111,33 +111,33 @@ public abstract class Receiver implements DataTransferProtocol {
   private void opTransferBlock(DataInputStream in) throws IOException {
   private void opTransferBlock(DataInputStream in) throws IOException {
     final OpTransferBlockProto proto =
     final OpTransferBlockProto proto =
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
-    transferBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()),
-        fromProto(proto.getHeader().getBaseHeader().getToken()),
+    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
-        fromProtos(proto.getTargetsList()));
+        PBHelper.convert(proto.getTargetsList()));
   }
   }
 
 
   /** Receive OP_REPLACE_BLOCK */
   /** Receive OP_REPLACE_BLOCK */
   private void opReplaceBlock(DataInputStream in) throws IOException {
   private void opReplaceBlock(DataInputStream in) throws IOException {
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
-    replaceBlock(fromProto(proto.getHeader().getBlock()),
-        fromProto(proto.getHeader().getToken()),
+    replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getToken()),
         proto.getDelHint(),
         proto.getDelHint(),
-        fromProto(proto.getSource()));
+        PBHelper.convert(proto.getSource()));
   }
   }
 
 
   /** Receive OP_COPY_BLOCK */
   /** Receive OP_COPY_BLOCK */
   private void opCopyBlock(DataInputStream in) throws IOException {
   private void opCopyBlock(DataInputStream in) throws IOException {
     OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
     OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
-    copyBlock(fromProto(proto.getHeader().getBlock()),
-        fromProto(proto.getHeader().getToken()));
+    copyBlock(PBHelper.convert(proto.getHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getToken()));
   }
   }
 
 
   /** Receive OP_BLOCK_CHECKSUM */
   /** Receive OP_BLOCK_CHECKSUM */
   private void opBlockChecksum(DataInputStream in) throws IOException {
   private void opBlockChecksum(DataInputStream in) throws IOException {
     OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
     OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
     
     
-    blockChecksum(fromProto(proto.getHeader().getBlock()),
-        fromProto(proto.getHeader().getToken()));
+    blockChecksum(PBHelper.convert(proto.getHeader().getBlock()),
+        PBHelper.convert(proto.getHeader().getToken()));
   }
   }
 }
 }

+ 12 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProto;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProtos;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.toProto;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.toProto;
 
 
 import java.io.DataOutput;
 import java.io.DataOutput;
@@ -37,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
@@ -63,6 +62,10 @@ public class Sender implements DataTransferProtocol {
 
 
   private static void send(final DataOutputStream out, final Op opcode,
   private static void send(final DataOutputStream out, final Op opcode,
       final Message proto) throws IOException {
       final Message proto) throws IOException {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName()
+          + ": " + proto);
+    }
     op(out, opcode);
     op(out, opcode);
     proto.writeDelimitedTo(out);
     proto.writeDelimitedTo(out);
     out.flush();
     out.flush();
@@ -73,12 +76,14 @@ public class Sender implements DataTransferProtocol {
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final long blockOffset,
       final long blockOffset,
-      final long length) throws IOException {
+      final long length,
+      final boolean sendChecksum) throws IOException {
 
 
     OpReadBlockProto proto = OpReadBlockProto.newBuilder()
     OpReadBlockProto proto = OpReadBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
       .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
       .setOffset(blockOffset)
       .setOffset(blockOffset)
       .setLen(length)
       .setLen(length)
+      .setSendChecksums(sendChecksum)
       .build();
       .build();
 
 
     send(out, Op.READ_BLOCK, proto);
     send(out, Op.READ_BLOCK, proto);
@@ -105,7 +110,7 @@ public class Sender implements DataTransferProtocol {
 
 
     OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
     OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
       .setHeader(header)
       .setHeader(header)
-      .addAllTargets(toProtos(targets, 1))
+      .addAllTargets(PBHelper.convert(targets, 1))
       .setStage(toProto(stage))
       .setStage(toProto(stage))
       .setPipelineSize(pipelineSize)
       .setPipelineSize(pipelineSize)
       .setMinBytesRcvd(minBytesRcvd)
       .setMinBytesRcvd(minBytesRcvd)
@@ -114,7 +119,7 @@ public class Sender implements DataTransferProtocol {
       .setRequestedChecksum(checksumProto);
       .setRequestedChecksum(checksumProto);
     
     
     if (source != null) {
     if (source != null) {
-      proto.setSource(toProto(source));
+      proto.setSource(PBHelper.convertDatanodeInfo(source));
     }
     }
 
 
     send(out, Op.WRITE_BLOCK, proto.build());
     send(out, Op.WRITE_BLOCK, proto.build());
@@ -129,7 +134,7 @@ public class Sender implements DataTransferProtocol {
     OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
     OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildClientHeader(
       .setHeader(DataTransferProtoUtil.buildClientHeader(
           blk, clientName, blockToken))
           blk, clientName, blockToken))
-      .addAllTargets(toProtos(targets, 0))
+      .addAllTargets(PBHelper.convert(targets))
       .build();
       .build();
 
 
     send(out, Op.TRANSFER_BLOCK, proto);
     send(out, Op.TRANSFER_BLOCK, proto);
@@ -143,7 +148,7 @@ public class Sender implements DataTransferProtocol {
     OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
     OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
       .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
       .setDelHint(delHint)
       .setDelHint(delHint)
-      .setSource(toProto(source))
+      .setSource(PBHelper.convertDatanodeInfo(source))
       .build();
       .build();
     
     
     send(out, Op.REPLACE_BLOCK, proto);
     send(out, Op.REPLACE_BLOCK, proto);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java

@@ -37,9 +37,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetRep
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -133,7 +133,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
       }
       }
       List<Token<BlockTokenIdentifier>> tokens = 
       List<Token<BlockTokenIdentifier>> tokens = 
           new ArrayList<Token<BlockTokenIdentifier>>(request.getTokensCount());
           new ArrayList<Token<BlockTokenIdentifier>>(request.getTokensCount());
-      for (BlockTokenIdentifierProto b : request.getTokensList()) {
+      for (TokenProto b : request.getTokensList()) {
         tokens.add(PBHelper.convert(b));
         tokens.add(PBHelper.convert(b));
       }
       }
       // Call the real implementation
       // Call the real implementation

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdf
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
@@ -55,6 +54,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -77,7 +77,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final ClientDatanodeProtocolPB rpcProxy;
   private final ClientDatanodeProtocolPB rpcProxy;
-  private final static RefreshNamenodesRequestProto REFRESH_NAMENODES = 
+  private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES = 
       RefreshNamenodesRequestProto.newBuilder().build();
       RefreshNamenodesRequestProto.newBuilder().build();
 
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
@@ -170,7 +170,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   @Override
   @Override
   public void refreshNamenodes() throws IOException {
   public void refreshNamenodes() throws IOException {
     try {
     try {
-      rpcProxy.refreshNamenodes(NULL_CONTROLLER, REFRESH_NAMENODES);
+      rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -222,8 +222,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
     // Convert to proto objects
     // Convert to proto objects
     List<ExtendedBlockProto> blocksProtos = 
     List<ExtendedBlockProto> blocksProtos = 
         new ArrayList<ExtendedBlockProto>(blocks.size());
         new ArrayList<ExtendedBlockProto>(blocks.size());
-    List<BlockTokenIdentifierProto> tokensProtos = 
-        new ArrayList<BlockTokenIdentifierProto>(tokens.size());
+    List<TokenProto> tokensProtos = 
+        new ArrayList<TokenProto>(tokens.size());
     for (ExtendedBlock b : blocks) {
     for (ExtendedBlock b : blocks) {
       blocksProtos.add(PBHelper.convert(b));
       blocksProtos.add(PBHelper.convert(b));
     }
     }

+ 107 - 92
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -38,8 +37,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -65,8 +62,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -95,8 +90,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
@@ -125,12 +118,19 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
+import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
@@ -148,6 +148,78 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     ClientNamenodeProtocolPB {
     ClientNamenodeProtocolPB {
   final private ClientProtocol server;
   final private ClientProtocol server;
 
 
+  private static final CreateResponseProto VOID_CREATE_RESPONSE = 
+  CreateResponseProto.newBuilder().build();
+
+  private static final AppendResponseProto VOID_APPEND_RESPONSE = 
+  AppendResponseProto.newBuilder().build();
+
+  private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = 
+  SetPermissionResponseProto.newBuilder().build();
+
+  private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = 
+  SetOwnerResponseProto.newBuilder().build();
+
+  private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = 
+  AbandonBlockResponseProto.newBuilder().build();
+
+  private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = 
+  ReportBadBlocksResponseProto.newBuilder().build();
+
+  private static final ConcatResponseProto VOID_CONCAT_RESPONSE = 
+  ConcatResponseProto.newBuilder().build();
+
+  private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = 
+  Rename2ResponseProto.newBuilder().build();
+
+  private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = 
+  GetListingResponseProto.newBuilder().build();
+
+  private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = 
+  RenewLeaseResponseProto.newBuilder().build();
+
+  private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = 
+  SaveNamespaceResponseProto.newBuilder().build();
+
+  private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = 
+  RefreshNodesResponseProto.newBuilder().build();
+
+  private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = 
+  FinalizeUpgradeResponseProto.newBuilder().build();
+
+  private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = 
+  MetaSaveResponseProto.newBuilder().build();
+
+  private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = 
+  GetFileInfoResponseProto.newBuilder().build();
+
+  private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = 
+  GetFileLinkInfoResponseProto.newBuilder().build();
+
+  private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = 
+  SetQuotaResponseProto.newBuilder().build();
+
+  private static final FsyncResponseProto VOID_FSYNC_RESPONSE = 
+  FsyncResponseProto.newBuilder().build();
+
+  private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = 
+  SetTimesResponseProto.newBuilder().build();
+
+  private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = 
+  CreateSymlinkResponseProto.newBuilder().build();
+
+  private static final UpdatePipelineResponseProto
+    VOID_UPDATEPIPELINE_RESPONSE = 
+  UpdatePipelineResponseProto.newBuilder().build();
+
+  private static final CancelDelegationTokenResponseProto 
+      VOID_CANCELDELEGATIONTOKEN_RESPONSE = 
+          CancelDelegationTokenResponseProto.newBuilder().build();
+
+  private static final SetBalancerBandwidthResponseProto 
+      VOID_SETBALANCERBANDWIDTH_RESPONSE = 
+        SetBalancerBandwidthResponseProto.newBuilder().build();
+
   /**
   /**
    * Constructor
    * Constructor
    * 
    * 
@@ -192,9 +264,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
   
   
-  static final CreateResponseProto VOID_CREATE_RESPONSE = 
-      CreateResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public CreateResponseProto create(RpcController controller,
   public CreateResponseProto create(RpcController controller,
       CreateRequestProto req) throws ServiceException {
       CreateRequestProto req) throws ServiceException {
@@ -209,9 +278,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_CREATE_RESPONSE;
     return VOID_CREATE_RESPONSE;
   }
   }
   
   
-  static final AppendResponseProto NULL_APPEND_RESPONSE = 
-      AppendResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public AppendResponseProto append(RpcController controller,
   public AppendResponseProto append(RpcController controller,
       AppendRequestProto req) throws ServiceException {
       AppendRequestProto req) throws ServiceException {
@@ -221,7 +287,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return AppendResponseProto.newBuilder()
         return AppendResponseProto.newBuilder()
             .setBlock(PBHelper.convert(result)).build();
             .setBlock(PBHelper.convert(result)).build();
       }
       }
-      return NULL_APPEND_RESPONSE;
+      return VOID_APPEND_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -240,9 +306,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
 
 
-  static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = 
-      SetPermissionResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetPermissionResponseProto setPermission(RpcController controller,
   public SetPermissionResponseProto setPermission(RpcController controller,
       SetPermissionRequestProto req) throws ServiceException {
       SetPermissionRequestProto req) throws ServiceException {
@@ -254,9 +317,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_SET_PERM_RESPONSE;
     return VOID_SET_PERM_RESPONSE;
   }
   }
 
 
-  static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = 
-      SetOwnerResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetOwnerResponseProto setOwner(RpcController controller,
   public SetOwnerResponseProto setOwner(RpcController controller,
       SetOwnerRequestProto req) throws ServiceException {
       SetOwnerRequestProto req) throws ServiceException {
@@ -270,9 +330,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_SET_OWNER_RESPONSE;
     return VOID_SET_OWNER_RESPONSE;
   }
   }
 
 
-  static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = 
-      AbandonBlockResponseProto.newBuilder().build();
-
   @Override
   @Override
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
       AbandonBlockRequestProto req) throws ServiceException {
       AbandonBlockRequestProto req) throws ServiceException {
@@ -338,9 +395,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = 
-      ReportBadBlocksResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
   public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
       ReportBadBlocksRequestProto req) throws ServiceException {
       ReportBadBlocksRequestProto req) throws ServiceException {
@@ -354,9 +408,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_REP_BAD_BLOCK_RESPONSE;
     return VOID_REP_BAD_BLOCK_RESPONSE;
   }
   }
 
 
-  static final ConcatResponseProto VOID_CONCAT_RESPONSE = 
-      ConcatResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public ConcatResponseProto concat(RpcController controller,
   public ConcatResponseProto concat(RpcController controller,
       ConcatRequestProto req) throws ServiceException {
       ConcatRequestProto req) throws ServiceException {
@@ -380,9 +431,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final Rename2ResponseProto VOID_RENAME2_RESPONSE = 
-      Rename2ResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public Rename2ResponseProto rename2(RpcController controller,
   public Rename2ResponseProto rename2(RpcController controller,
       Rename2RequestProto req) throws ServiceException {
       Rename2RequestProto req) throws ServiceException {
@@ -419,8 +467,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final GetListingResponseProto NULL_GETLISTING_RESPONSE = 
-      GetListingResponseProto.newBuilder().build();
   @Override
   @Override
   public GetListingResponseProto getListing(RpcController controller,
   public GetListingResponseProto getListing(RpcController controller,
       GetListingRequestProto req) throws ServiceException {
       GetListingRequestProto req) throws ServiceException {
@@ -432,16 +478,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return GetListingResponseProto.newBuilder().setDirList(
         return GetListingResponseProto.newBuilder().setDirList(
           PBHelper.convert(result)).build();
           PBHelper.convert(result)).build();
       } else {
       } else {
-        return NULL_GETLISTING_RESPONSE;
+        return VOID_GETLISTING_RESPONSE;
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
   
   
-  static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = 
-      RenewLeaseResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public RenewLeaseResponseProto renewLease(RpcController controller,
   public RenewLeaseResponseProto renewLease(RpcController controller,
       RenewLeaseRequestProto req) throws ServiceException {
       RenewLeaseRequestProto req) throws ServiceException {
@@ -492,10 +535,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, GetDatanodeReportRequestProto req)
       RpcController controller, GetDatanodeReportRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      DatanodeInfoProto[] result = PBHelper.convert(server
+      List<? extends DatanodeInfoProto> result = PBHelper.convert(server
           .getDatanodeReport(PBHelper.convert(req.getType())));
           .getDatanodeReport(PBHelper.convert(req.getType())));
       return GetDatanodeReportResponseProto.newBuilder()
       return GetDatanodeReportResponseProto.newBuilder()
-          .addAllDi(Arrays.asList(result)).build();
+          .addAllDi(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -526,9 +569,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = 
-      SaveNamespaceResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SaveNamespaceResponseProto saveNamespace(RpcController controller,
   public SaveNamespaceResponseProto saveNamespace(RpcController controller,
       SaveNamespaceRequestProto req) throws ServiceException {
       SaveNamespaceRequestProto req) throws ServiceException {
@@ -555,9 +595,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
   }
 
 
 
 
-  static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = 
-      RefreshNodesResponseProto.newBuilder().build();
-
   @Override
   @Override
   public RefreshNodesResponseProto refreshNodes(RpcController controller,
   public RefreshNodesResponseProto refreshNodes(RpcController controller,
       RefreshNodesRequestProto req) throws ServiceException {
       RefreshNodesRequestProto req) throws ServiceException {
@@ -570,9 +607,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
 
 
   }
   }
 
 
-  static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = 
-      FinalizeUpgradeResponseProto.newBuilder().build();
-
   @Override
   @Override
   public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
   public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
       FinalizeUpgradeRequestProto req) throws ServiceException {
       FinalizeUpgradeRequestProto req) throws ServiceException {
@@ -599,9 +633,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = 
-      MetaSaveResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public MetaSaveResponseProto metaSave(RpcController controller,
   public MetaSaveResponseProto metaSave(RpcController controller,
       MetaSaveRequestProto req) throws ServiceException {
       MetaSaveRequestProto req) throws ServiceException {
@@ -614,8 +645,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
 
 
   }
   }
 
 
-  static final GetFileInfoResponseProto NULL_GETFILEINFO_RESPONSE = 
-      GetFileInfoResponseProto.newBuilder().build();
   @Override
   @Override
   public GetFileInfoResponseProto getFileInfo(RpcController controller,
   public GetFileInfoResponseProto getFileInfo(RpcController controller,
       GetFileInfoRequestProto req) throws ServiceException {
       GetFileInfoRequestProto req) throws ServiceException {
@@ -626,14 +655,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return GetFileInfoResponseProto.newBuilder().setFs(
         return GetFileInfoResponseProto.newBuilder().setFs(
             PBHelper.convert(result)).build();
             PBHelper.convert(result)).build();
       }
       }
-      return NULL_GETFILEINFO_RESPONSE;      
+      return VOID_GETFILEINFO_RESPONSE;      
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
 
 
-  static final GetFileLinkInfoResponseProto NULL_GETFILELINKINFO_RESPONSE = 
-      GetFileLinkInfoResponseProto.newBuilder().build();
   @Override
   @Override
   public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
   public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
       GetFileLinkInfoRequestProto req) throws ServiceException {
       GetFileLinkInfoRequestProto req) throws ServiceException {
@@ -645,7 +672,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
             PBHelper.convert(result)).build();
             PBHelper.convert(result)).build();
       } else {
       } else {
         System.out.println("got  null result for getFileLinkInfo for " + req.getSrc());
         System.out.println("got  null result for getFileLinkInfo for " + req.getSrc());
-        return NULL_GETFILELINKINFO_RESPONSE;      
+        return VOID_GETFILELINKINFO_RESPONSE;      
       }
       }
 
 
     } catch (IOException e) {
     } catch (IOException e) {
@@ -666,9 +693,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = 
-      SetQuotaResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public SetQuotaResponseProto setQuota(RpcController controller,
   public SetQuotaResponseProto setQuota(RpcController controller,
       SetQuotaRequestProto req) throws ServiceException {
       SetQuotaRequestProto req) throws ServiceException {
@@ -681,9 +705,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
   
   
-  static final FsyncResponseProto VOID_FSYNC_RESPONSE = 
-      FsyncResponseProto.newBuilder().build();
-
   @Override
   @Override
   public FsyncResponseProto fsync(RpcController controller,
   public FsyncResponseProto fsync(RpcController controller,
       FsyncRequestProto req) throws ServiceException {
       FsyncRequestProto req) throws ServiceException {
@@ -695,9 +716,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = 
-      SetTimesResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetTimesResponseProto setTimes(RpcController controller,
   public SetTimesResponseProto setTimes(RpcController controller,
       SetTimesRequestProto req) throws ServiceException {
       SetTimesRequestProto req) throws ServiceException {
@@ -709,9 +727,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = 
-      CreateSymlinkResponseProto.newBuilder().build();
-
   @Override
   @Override
   public CreateSymlinkResponseProto createSymlink(RpcController controller,
   public CreateSymlinkResponseProto createSymlink(RpcController controller,
       CreateSymlinkRequestProto req) throws ServiceException {
       CreateSymlinkRequestProto req) throws ServiceException {
@@ -729,8 +744,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       GetLinkTargetRequestProto req) throws ServiceException {
       GetLinkTargetRequestProto req) throws ServiceException {
     try {
     try {
       String result = server.getLinkTarget(req.getPath());
       String result = server.getLinkTarget(req.getPath());
-      return GetLinkTargetResponseProto.newBuilder().setTargetPath(result)
-          .build();
+      GetLinkTargetResponseProto.Builder builder = GetLinkTargetResponseProto
+          .newBuilder();
+      if (result != null) {
+        builder.setTargetPath(result);
+      }
+      return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -751,9 +770,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final UpdatePipelineResponseProto VOID_UPDATEPIPELINE_RESPONSE = 
-      UpdatePipelineResponseProto.newBuilder().build();
-
   @Override
   @Override
   public UpdatePipelineResponseProto updatePipeline(RpcController controller,
   public UpdatePipelineResponseProto updatePipeline(RpcController controller,
       UpdatePipelineRequestProto req) throws ServiceException {
       UpdatePipelineRequestProto req) throws ServiceException {
@@ -774,10 +790,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, GetDelegationTokenRequestProto req)
       RpcController controller, GetDelegationTokenRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      BlockTokenIdentifierProto result = PBHelper.convert(server
-          .getDelegationToken(new Text(req.getRenewer())));
-      return GetDelegationTokenResponseProto.newBuilder().setToken(result)
-          .build();
+      Token<DelegationTokenIdentifier> token = server
+          .getDelegationToken(new Text(req.getRenewer()));
+      GetDelegationTokenResponseProto.Builder rspBuilder = 
+          GetDelegationTokenResponseProto.newBuilder();
+      if (token != null) {
+        rspBuilder.setToken(PBHelper.convert(token));
+      }
+      return rspBuilder.build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -791,16 +811,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       long result = server.renewDelegationToken(PBHelper
       long result = server.renewDelegationToken(PBHelper
           .convertDelegationToken(req.getToken()));
           .convertDelegationToken(req.getToken()));
       return RenewDelegationTokenResponseProto.newBuilder()
       return RenewDelegationTokenResponseProto.newBuilder()
-          .setNewExireTime(result).build();
+          .setNewExpiryTime(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
 
 
-  static final CancelDelegationTokenResponseProto 
-      VOID_CANCELDELEGATIONTOKEN_RESPONSE = 
-      CancelDelegationTokenResponseProto.newBuilder().build();
-  
   @Override
   @Override
   public CancelDelegationTokenResponseProto cancelDelegationToken(
   public CancelDelegationTokenResponseProto cancelDelegationToken(
       RpcController controller, CancelDelegationTokenRequestProto req)
       RpcController controller, CancelDelegationTokenRequestProto req)
@@ -814,10 +830,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
-  static final SetBalancerBandwidthResponseProto 
-    VOID_SETBALANCERBANDWIDTH_RESPONSE = 
-      SetBalancerBandwidthResponseProto.newBuilder().build();
-
   @Override
   @Override
   public SetBalancerBandwidthResponseProto setBalancerBandwidth(
   public SetBalancerBandwidthResponseProto setBalancerBandwidth(
       RpcController controller, SetBalancerBandwidthRequestProto req)
       RpcController controller, SetBalancerBandwidthRequestProto req)
@@ -835,10 +847,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, GetDataEncryptionKeyRequestProto request)
       RpcController controller, GetDataEncryptionKeyRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
+      GetDataEncryptionKeyResponseProto.Builder builder = 
+          GetDataEncryptionKeyResponseProto.newBuilder();
       DataEncryptionKey encryptionKey = server.getDataEncryptionKey();
       DataEncryptionKey encryptionKey = server.getDataEncryptionKey();
-      return GetDataEncryptionKeyResponseProto.newBuilder()
-          .setDataEncryptionKey(PBHelper.convert(encryptionKey))
-          .build();
+      if (encryptionKey != null) {
+        builder.setDataEncryptionKey(PBHelper.convert(encryptionKey));
+      }
+      return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }

+ 50 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Abando
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -64,14 +63,15 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
@@ -83,7 +83,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Recove
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
@@ -110,6 +109,10 @@ import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -126,6 +129,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
     ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
     ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
   final private ClientNamenodeProtocolPB rpcProxy;
   final private ClientNamenodeProtocolPB rpcProxy;
 
 
+  static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST = 
+  GetServerDefaultsRequestProto.newBuilder().build();
+
+  private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
+  GetFsStatusRequestProto.newBuilder().build();
+
+  private final static SaveNamespaceRequestProto VOID_SAVE_NAMESPACE_REQUEST =
+  SaveNamespaceRequestProto.newBuilder().build();
+
+  private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST = 
+  RollEditsRequestProto.getDefaultInstance();
+
+  private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST =
+  RefreshNodesRequestProto.newBuilder().build();
+
+  private final static FinalizeUpgradeRequestProto
+  VOID_FINALIZE_UPGRADE_REQUEST =
+      FinalizeUpgradeRequestProto.newBuilder().build();
+
+  private final static GetDataEncryptionKeyRequestProto
+  VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
+      GetDataEncryptionKeyRequestProto.newBuilder().build();
+
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
     rpcProxy = proxy;
     rpcProxy = proxy;
   }
   }
@@ -157,7 +183,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
-    GetServerDefaultsRequestProto req = GetServerDefaultsRequestProto.newBuilder().build();
+    GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
     try {
     try {
       return PBHelper
       return PBHelper
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
@@ -280,7 +306,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     if (previous != null) 
     if (previous != null) 
       req.setPrevious(PBHelper.convert(previous)); 
       req.setPrevious(PBHelper.convert(previous)); 
     if (excludeNodes != null) 
     if (excludeNodes != null) 
-      req.addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)));
+      req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
     try {
     try {
       return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
       return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -298,8 +324,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .newBuilder()
         .newBuilder()
         .setSrc(src)
         .setSrc(src)
         .setBlk(PBHelper.convert(blk))
         .setBlk(PBHelper.convert(blk))
-        .addAllExistings(Arrays.asList(PBHelper.convert(existings)))
-        .addAllExcludes(Arrays.asList(PBHelper.convert(excludes)))
+        .addAllExistings(PBHelper.convert(existings))
+        .addAllExcludes(PBHelper.convert(excludes))
         .setNumAdditionalNodes(numAdditionalNodes)
         .setNumAdditionalNodes(numAdditionalNodes)
         .setClientName(clientName)
         .setClientName(clientName)
         .build();
         .build();
@@ -470,9 +496,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public long[] getStats() throws IOException {
   public long[] getStats() throws IOException {
-    GetFsStatusRequestProto req = GetFsStatusRequestProto.newBuilder().build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getFsStats(null, req));
+      return PBHelper.convert(rpcProxy.getFsStats(null,
+          VOID_GET_FSSTATUS_REQUEST));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -519,10 +545,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void saveNamespace() throws AccessControlException, IOException {
   public void saveNamespace() throws AccessControlException, IOException {
-    SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder()
-        .build();
     try {
     try {
-      rpcProxy.saveNamespace(null, req);
+      rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -530,9 +554,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
   
   
   @Override
   @Override
   public long rollEdits() throws AccessControlException, IOException {
   public long rollEdits() throws AccessControlException, IOException {
-    RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance();
     try {
     try {
-      RollEditsResponseProto resp = rpcProxy.rollEdits(null, req);
+      RollEditsResponseProto resp = rpcProxy.rollEdits(null,
+          VOID_ROLLEDITS_REQUEST);
       return resp.getNewSegmentTxId();
       return resp.getNewSegmentTxId();
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
@@ -554,9 +578,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshNodes() throws IOException {
   public void refreshNodes() throws IOException {
-    RefreshNodesRequestProto req = RefreshNodesRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshNodes(null, req);
+      rpcProxy.refreshNodes(null, VOID_REFRESH_NODES_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -564,9 +587,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
   @Override
   @Override
   public void finalizeUpgrade() throws IOException {
   public void finalizeUpgrade() throws IOException {
-    FinalizeUpgradeRequestProto req = FinalizeUpgradeRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.finalizeUpgrade(null, req);
+      rpcProxy.finalizeUpgrade(null, VOID_FINALIZE_UPGRADE_REQUEST);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -712,7 +734,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
     GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
         .setPath(path).build();
         .setPath(path).build();
     try {
     try {
-      return rpcProxy.getLinkTarget(null, req).getTargetPath();
+      GetLinkTargetResponseProto rsp = rpcProxy.getLinkTarget(null, req);
+      return rsp.hasTargetPath() ? rsp.getTargetPath() : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -758,7 +781,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setRenewer(renewer.toString())
         .setRenewer(renewer.toString())
         .build();
         .build();
     try {
     try {
-      return PBHelper.convertDelegationToken(rpcProxy.getDelegationToken(null, req).getToken());
+      GetDelegationTokenResponseProto resp = rpcProxy.getDelegationToken(null, req);
+      return resp.hasToken() ? PBHelper.convertDelegationToken(resp.getToken())
+          : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -771,7 +796,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         setToken(PBHelper.convert(token)).
         setToken(PBHelper.convert(token)).
         build();
         build();
     try {
     try {
-      return rpcProxy.renewDelegationToken(null, req).getNewExireTime();
+      return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -812,11 +837,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   
   
   @Override
   @Override
   public DataEncryptionKey getDataEncryptionKey() throws IOException {
   public DataEncryptionKey getDataEncryptionKey() throws IOException {
-    GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto
-        .newBuilder().build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getDataEncryptionKey(null, req)
-          .getDataEncryptionKey());
+      GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
+          null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
+     return rsp.hasDataEncryptionKey() ? 
+          PBHelper.convert(rsp.getDataEncryptionKey()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -84,7 +84,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   
   
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final DatanodeProtocolPB rpcProxy;
   private final DatanodeProtocolPB rpcProxy;
-  private static final VersionRequestProto VERSION_REQUEST = 
+  private static final VersionRequestProto VOID_VERSION_REQUEST = 
       VersionRequestProto.newBuilder().build();
       VersionRequestProto.newBuilder().build();
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   
   
@@ -243,7 +243,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   public NamespaceInfo versionRequest() throws IOException {
   public NamespaceInfo versionRequest() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
-          VERSION_REQUEST).getInfo());
+          VOID_VERSION_REQUEST).getInfo());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java

@@ -62,15 +62,17 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     DatanodeProtocolPB {
     DatanodeProtocolPB {
 
 
   private final DatanodeProtocol impl;
   private final DatanodeProtocol impl;
-  private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO = 
-      ErrorReportResponseProto.newBuilder().build();
+  private static final ErrorReportResponseProto
+      VOID_ERROR_REPORT_RESPONSE_PROTO = 
+          ErrorReportResponseProto.newBuilder().build();
   private static final BlockReceivedAndDeletedResponseProto 
   private static final BlockReceivedAndDeletedResponseProto 
-      BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
+      VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
           BlockReceivedAndDeletedResponseProto.newBuilder().build();
           BlockReceivedAndDeletedResponseProto.newBuilder().build();
-  private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE = 
-      ReportBadBlocksResponseProto.newBuilder().build();
+  private static final ReportBadBlocksResponseProto
+      VOID_REPORT_BAD_BLOCK_RESPONSE = 
+          ReportBadBlocksResponseProto.newBuilder().build();
   private static final CommitBlockSynchronizationResponseProto 
   private static final CommitBlockSynchronizationResponseProto 
-      COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
+      VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
           CommitBlockSynchronizationResponseProto.newBuilder().build();
           CommitBlockSynchronizationResponseProto.newBuilder().build();
 
 
   public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
   public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
@@ -180,7 +182,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return BLOCK_RECEIVED_AND_DELETE_RESPONSE;
+    return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -192,7 +194,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return ERROR_REPORT_RESPONSE_PROTO;
+    return VOID_ERROR_REPORT_RESPONSE_PROTO;
   }
   }
 
 
   @Override
   @Override
@@ -221,7 +223,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return REPORT_BAD_BLOCK_RESPONSE;
+    return VOID_REPORT_BAD_BLOCK_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -242,6 +244,6 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
+    return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
   }
   }
 }
 }

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java

@@ -42,6 +42,13 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
   /** Server side implementation to delegate the requests to */
   /** Server side implementation to delegate the requests to */
   private final JournalProtocol impl;
   private final JournalProtocol impl;
 
 
+  private final static JournalResponseProto VOID_JOURNAL_RESPONSE = 
+  JournalResponseProto.newBuilder().build();
+
+  private final static StartLogSegmentResponseProto
+  VOID_START_LOG_SEGMENT_RESPONSE =
+      StartLogSegmentResponseProto.newBuilder().build();
+
   public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
   public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -56,7 +63,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return JournalResponseProto.newBuilder().build();
+    return VOID_JOURNAL_RESPONSE;
   }
   }
 
 
   /** @see JournalProtocol#startLogSegment */
   /** @see JournalProtocol#startLogSegment */
@@ -69,7 +76,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return StartLogSegmentResponseProto.newBuilder().build();
+    return VOID_START_LOG_SEGMENT_RESPONSE;
   }
   }
 
 
   @Override
   @Override

+ 14 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java

@@ -63,6 +63,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     NamenodeProtocolPB {
     NamenodeProtocolPB {
   private final NamenodeProtocol impl;
   private final NamenodeProtocol impl;
 
 
+  private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE = 
+  ErrorReportResponseProto.newBuilder().build();
+
+  private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE =
+  EndCheckpointResponseProto.newBuilder().build();
+
   public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
   public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -91,8 +97,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return GetBlockKeysResponseProto.newBuilder()
-        .setKeys(PBHelper.convert(keys)).build();
+    GetBlockKeysResponseProto.Builder builder = 
+        GetBlockKeysResponseProto.newBuilder();
+    if (keys != null) {
+      builder.setKeys(PBHelper.convert(keys));
+    }
+    return builder.build();
   }
   }
 
 
   @Override
   @Override
@@ -143,7 +153,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return ErrorReportResponseProto.newBuilder().build();
+    return VOID_ERROR_REPORT_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -181,7 +191,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return EndCheckpointResponseProto.newBuilder().build();
+    return VOID_END_CHECKPOINT_RESPONSE;
   }
   }
 
 
   @Override
   @Override

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
@@ -67,13 +68,13 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   /*
   /*
    * Protobuf requests with no parameters instantiated only once
    * Protobuf requests with no parameters instantiated only once
    */
    */
-  private static final GetBlockKeysRequestProto GET_BLOCKKEYS = 
+  private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST = 
       GetBlockKeysRequestProto.newBuilder().build();
       GetBlockKeysRequestProto.newBuilder().build();
-  private static final GetTransactionIdRequestProto GET_TRANSACTIONID = 
+  private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST = 
       GetTransactionIdRequestProto.newBuilder().build();
       GetTransactionIdRequestProto.newBuilder().build();
-  private static final RollEditLogRequestProto ROLL_EDITLOG = 
+  private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST = 
       RollEditLogRequestProto.newBuilder().build();
       RollEditLogRequestProto.newBuilder().build();
-  private static final VersionRequestProto VERSION_REQUEST = 
+  private static final VersionRequestProto VOID_VERSION_REQUEST = 
       VersionRequestProto.newBuilder().build();
       VersionRequestProto.newBuilder().build();
 
 
   final private NamenodeProtocolPB rpcProxy;
   final private NamenodeProtocolPB rpcProxy;
@@ -104,8 +105,9 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   @Override
   @Override
   public ExportedBlockKeys getBlockKeys() throws IOException {
   public ExportedBlockKeys getBlockKeys() throws IOException {
     try {
     try {
-      return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
-          GET_BLOCKKEYS).getKeys());
+      GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
+          VOID_GET_BLOCKKEYS_REQUEST);
+      return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -114,8 +116,8 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   @Override
   @Override
   public long getTransactionID() throws IOException {
   public long getTransactionID() throws IOException {
     try {
     try {
-      return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
-          .getTxId();
+      return rpcProxy.getTransactionId(NULL_CONTROLLER,
+          VOID_GET_TRANSACTIONID_REQUEST).getTxId();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -135,7 +137,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   public CheckpointSignature rollEditLog() throws IOException {
   public CheckpointSignature rollEditLog() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
-          ROLL_EDITLOG).getSignature());
+          VOID_ROLL_EDITLOG_REQUEST).getSignature());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -145,7 +147,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   public NamespaceInfo versionRequest() throws IOException {
   public NamespaceInfo versionRequest() throws IOException {
     try {
     try {
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
       return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
-          VERSION_REQUEST).getInfo());
+          VOID_VERSION_REQUEST).getInfo());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 64 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -17,6 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.EnumSet;
@@ -40,10 +43,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
@@ -64,7 +67,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageRepor
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
@@ -128,14 +130,20 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat
 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
 
 
+import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
 
 
 /**
 /**
- * Utilities for converting protobuf classes to and from implementation classes.
+ * Utilities for converting protobuf classes to and from implementation classes
+ * and other helper utilities to help in dealing with protobuf.
  * 
  * 
  * Note that when converting from an internal type to protobuf type, the
  * Note that when converting from an internal type to protobuf type, the
  * converter never return null for protobuf type. The check for internal type
  * converter never return null for protobuf type. The check for internal type
@@ -219,7 +227,8 @@ public class PBHelper {
 
 
   // Arrays of DatanodeId
   // Arrays of DatanodeId
   public static DatanodeIDProto[] convert(DatanodeID[] did) {
   public static DatanodeIDProto[] convert(DatanodeID[] did) {
-    if (did == null) return null;
+    if (did == null)
+      return null;
     final int len = did.length;
     final int len = did.length;
     DatanodeIDProto[] result = new DatanodeIDProto[len];
     DatanodeIDProto[] result = new DatanodeIDProto[len];
     for (int i = 0; i < len; ++i) {
     for (int i = 0; i < len; ++i) {
@@ -482,14 +491,26 @@ public class PBHelper {
     }    
     }    
     return result;
     return result;
   }
   }
+
+  public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
+      DatanodeInfo[] dnInfos) {
+    return convert(dnInfos, 0);
+  }
   
   
-  static public DatanodeInfoProto[] convert(DatanodeInfo[] di) {
-    if (di == null) return null;
-    DatanodeInfoProto[] result = new DatanodeInfoProto[di.length];
-    for (int i = 0; i < di.length; i++) {
-      result[i] = PBHelper.convertDatanodeInfo(di[i]);
+  /**
+   * Copy from {@code dnInfos} to a target of list of same size starting at
+   * {@code startIdx}.
+   */
+  public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
+      DatanodeInfo[] dnInfos, int startIdx) {
+    if (dnInfos == null)
+      return null;
+    ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
+        .newArrayListWithCapacity(dnInfos.length);
+    for (int i = startIdx; i < dnInfos.length; i++) {
+      protos.add(convert(dnInfos[i]));
     }
     }
-    return result;
+    return protos;
   }
   }
 
 
   public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
   public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
@@ -552,8 +573,8 @@ public class PBHelper {
     return lb;
     return lb;
   }
   }
 
 
-  public static BlockTokenIdentifierProto convert(Token<?> tok) {
-    return BlockTokenIdentifierProto.newBuilder().
+  public static TokenProto convert(Token<?> tok) {
+    return TokenProto.newBuilder().
               setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
               setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
               setPassword(ByteString.copyFrom(tok.getPassword())).
               setPassword(ByteString.copyFrom(tok.getPassword())).
               setKind(tok.getKind().toString()).
               setKind(tok.getKind().toString()).
@@ -561,7 +582,7 @@ public class PBHelper {
   }
   }
   
   
   public static Token<BlockTokenIdentifier> convert(
   public static Token<BlockTokenIdentifier> convert(
-      BlockTokenIdentifierProto blockToken) {
+      TokenProto blockToken) {
     return new Token<BlockTokenIdentifier>(blockToken.getIdentifier()
     return new Token<BlockTokenIdentifier>(blockToken.getIdentifier()
         .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
         .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
         blockToken.getKind()), new Text(blockToken.getService()));
         blockToken.getKind()), new Text(blockToken.getService()));
@@ -569,7 +590,7 @@ public class PBHelper {
 
 
   
   
   public static Token<DelegationTokenIdentifier> convertDelegationToken(
   public static Token<DelegationTokenIdentifier> convertDelegationToken(
-      BlockTokenIdentifierProto blockToken) {
+      TokenProto blockToken) {
     return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier()
     return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier()
         .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
         .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
         blockToken.getKind()), new Text(blockToken.getService()));
         blockToken.getKind()), new Text(blockToken.getService()));
@@ -679,6 +700,8 @@ public class PBHelper {
     case DatanodeProtocol.DNA_SHUTDOWN:
     case DatanodeProtocol.DNA_SHUTDOWN:
       builder.setAction(BlockCommandProto.Action.SHUTDOWN);
       builder.setAction(BlockCommandProto.Action.SHUTDOWN);
       break;
       break;
+    default:
+      throw new AssertionError("Invalid action");
     }
     }
     Block[] blocks = cmd.getBlocks();
     Block[] blocks = cmd.getBlocks();
     for (int i = 0; i < blocks.length; i++) {
     for (int i = 0; i < blocks.length; i++) {
@@ -692,7 +715,7 @@ public class PBHelper {
     DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
     DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
     for (int i = 0; i < targets.length; i++) {
     for (int i = 0; i < targets.length; i++) {
       ret[i] = DatanodeInfosProto.newBuilder()
       ret[i] = DatanodeInfosProto.newBuilder()
-          .addAllDatanodes(Arrays.asList(PBHelper.convert(targets[i]))).build();
+          .addAllDatanodes(PBHelper.convert(targets[i])).build();
     }
     }
     return Arrays.asList(ret);
     return Arrays.asList(ret);
   }
   }
@@ -861,25 +884,14 @@ public class PBHelper {
   // Located Block Arrays and Lists
   // Located Block Arrays and Lists
   public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) {
   public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) {
     if (lb == null) return null;
     if (lb == null) return null;
-    final int len = lb.length;
-    LocatedBlockProto[] result = new LocatedBlockProto[len];
-    for (int i = 0; i < len; ++i) {
-      result[i] = PBHelper.convert(lb[i]);
-    }
-    return result;
+    return convertLocatedBlock2(Arrays.asList(lb)).toArray(
+        new LocatedBlockProto[lb.length]);
   }
   }
   
   
   public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
   public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
     if (lb == null) return null;
     if (lb == null) return null;
-    final int len = lb.length;
-    LocatedBlock[] result = new LocatedBlock[len];
-    for (int i = 0; i < len; ++i) {
-      result[i] = new LocatedBlock(
-          PBHelper.convert(lb[i].getB()),
-          PBHelper.convert(lb[i].getLocsList()), 
-          lb[i].getOffset(), lb[i].getCorrupt());
-    }
-    return result;
+    return convertLocatedBlock(Arrays.asList(lb)).toArray(
+        new LocatedBlock[lb.length]);
   }
   }
   
   
   public static List<LocatedBlock> convertLocatedBlock(
   public static List<LocatedBlock> convertLocatedBlock(
@@ -961,7 +973,7 @@ public class PBHelper {
         fs.getFileBufferSize(),
         fs.getFileBufferSize(),
         fs.getEncryptDataTransfer(),
         fs.getEncryptDataTransfer(),
         fs.getTrashInterval(),
         fs.getTrashInterval(),
-        HdfsProtoUtil.fromProto(fs.getChecksumType()));
+        PBHelper.convert(fs.getChecksumType()));
   }
   }
   
   
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@@ -974,7 +986,7 @@ public class PBHelper {
       .setFileBufferSize(fs.getFileBufferSize())
       .setFileBufferSize(fs.getFileBufferSize())
       .setEncryptDataTransfer(fs.getEncryptDataTransfer())
       .setEncryptDataTransfer(fs.getEncryptDataTransfer())
       .setTrashInterval(fs.getTrashInterval())
       .setTrashInterval(fs.getTrashInterval())
-      .setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType()))
+      .setChecksumType(PBHelper.convert(fs.getChecksumType()))
       .build();
       .build();
   }
   }
   
   
@@ -1312,4 +1324,24 @@ public class PBHelper {
         .setLayoutVersion(j.getLayoutVersion())
         .setLayoutVersion(j.getLayoutVersion())
         .setNamespaceID(j.getNamespaceId()).build();
         .setNamespaceID(j.getNamespaceId()).build();
   }
   }
+
+  public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
+    return DataChecksum.Type.valueOf(type.getNumber());
+  }
+
+  public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
+    return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
+  }
+
+  public static InputStream vintPrefixed(final InputStream input)
+      throws IOException {
+    final int firstByte = input.read();
+    if (firstByte == -1) {
+      throw new EOFException("Premature EOF: no length prefix available");
+    }
+
+    int size = CodedInputStream.readRawVarint32(firstByte, input);
+    assert size >= 0;
+    return new ExactSizeInputStream(input, size);
+  }
 }
 }

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java

@@ -38,6 +38,10 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
   private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
   
   
+  private final static RefreshServiceAclRequestProto
+  VOID_REFRESH_SERVICE_ACL_REQUEST =
+      RefreshServiceAclRequestProto.newBuilder().build();
+
   public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
   public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
       RefreshAuthorizationPolicyProtocolPB rpcProxy) {
       RefreshAuthorizationPolicyProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
     this.rpcProxy = rpcProxy;
@@ -50,10 +54,9 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshServiceAcl() throws IOException {
   public void refreshServiceAcl() throws IOException {
-    RefreshServiceAclRequestProto request = RefreshServiceAclRequestProto
-        .newBuilder().build();
     try {
     try {
-      rpcProxy.refreshServiceAcl(NULL_CONTROLLER, request);
+      rpcProxy.refreshServiceAcl(NULL_CONTROLLER,
+          VOID_REFRESH_SERVICE_ACL_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java

@@ -32,6 +32,10 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
 
 
   private final RefreshAuthorizationPolicyProtocol impl;
   private final RefreshAuthorizationPolicyProtocol impl;
 
 
+  private final static RefreshServiceAclResponseProto
+  VOID_REFRESH_SERVICE_ACL_RESPONSE = RefreshServiceAclResponseProto
+      .newBuilder().build();
+
   public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
   public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
       RefreshAuthorizationPolicyProtocol impl) {
       RefreshAuthorizationPolicyProtocol impl) {
     this.impl = impl;
     this.impl = impl;
@@ -46,6 +50,6 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshServiceAclResponseProto.newBuilder().build();
+    return VOID_REFRESH_SERVICE_ACL_RESPONSE;
   }
   }
 }
 }

+ 12 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java

@@ -39,6 +39,14 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   private final RefreshUserMappingsProtocolPB rpcProxy;
   private final RefreshUserMappingsProtocolPB rpcProxy;
   
   
+  private final static RefreshUserToGroupsMappingsRequestProto 
+  VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST = 
+      RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
+
+  private final static RefreshSuperUserGroupsConfigurationRequestProto
+  VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST = 
+      RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
+
   public RefreshUserMappingsProtocolClientSideTranslatorPB(
   public RefreshUserMappingsProtocolClientSideTranslatorPB(
       RefreshUserMappingsProtocolPB rpcProxy) {
       RefreshUserMappingsProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
     this.rpcProxy = rpcProxy;
@@ -51,10 +59,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshUserToGroupsMappings() throws IOException {
   public void refreshUserToGroupsMappings() throws IOException {
-    RefreshUserToGroupsMappingsRequestProto request = 
-        RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER, request);
+      rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER,
+          VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }
@@ -62,10 +69,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
 
 
   @Override
   @Override
   public void refreshSuperUserGroupsConfiguration() throws IOException {
   public void refreshSuperUserGroupsConfiguration() throws IOException {
-    RefreshSuperUserGroupsConfigurationRequestProto request = 
-        RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
     try {
     try {
-      rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER, request);
+      rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER,
+          VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST);
     } catch (ServiceException se) {
     } catch (ServiceException se) {
       throw ProtobufHelper.getRemoteException(se);
       throw ProtobufHelper.getRemoteException(se);
     }
     }

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java

@@ -33,6 +33,15 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
 
 
   private final RefreshUserMappingsProtocol impl;
   private final RefreshUserMappingsProtocol impl;
   
   
+  private final static RefreshUserToGroupsMappingsResponseProto 
+  VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE =
+      RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
+
+  private final static RefreshSuperUserGroupsConfigurationResponseProto
+  VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE = 
+      RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
+      .build();
+
   public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
   public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -47,7 +56,7 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
+    return VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE;
   }
   }
 
 
   @Override
   @Override
@@ -60,7 +69,6 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
-        .build();
+    return VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE;
   }
   }
 }
 }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html


+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java

@@ -65,6 +65,13 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
   /** Server side implementation to delegate the requests to */
   /** Server side implementation to delegate the requests to */
   private final QJournalProtocol impl;
   private final QJournalProtocol impl;
 
 
+  private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
+  JournalResponseProto.newBuilder().build();
+
+  private final static StartLogSegmentResponseProto
+  VOID_START_LOG_SEGMENT_RESPONSE =
+      StartLogSegmentResponseProto.newBuilder().build();
+
   public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) {
   public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) {
     this.impl = impl;
     this.impl = impl;
   }
   }
@@ -135,7 +142,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return JournalResponseProto.newBuilder().build();
+    return VOID_JOURNAL_RESPONSE;
   }
   }
 
 
   /** @see JournalProtocol#heartbeat */
   /** @see JournalProtocol#heartbeat */
@@ -160,7 +167,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return StartLogSegmentResponseProto.newBuilder().build();
+    return VOID_START_LOG_SEGMENT_RESPONSE;
   }
   }
   
   
   @Override
   @Override

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java

@@ -58,6 +58,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Charsets;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList;
@@ -934,7 +935,7 @@ class Journal implements Closeable {
       fos.write('\n');
       fos.write('\n');
       // Write human-readable data after the protobuf. This is only
       // Write human-readable data after the protobuf. This is only
       // to assist in debugging -- it's not parsed at all.
       // to assist in debugging -- it's not parsed at all.
-      OutputStreamWriter writer = new OutputStreamWriter(fos);
+      OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);
       
       
       writer.write(String.valueOf(newData));
       writer.write(String.valueOf(newData));
       writer.write('\n');
       writer.write('\n');

+ 20 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -18,7 +18,8 @@
 package org.apache.hadoop.hdfs.server.balancer;
 package org.apache.hadoop.hdfs.server.balancer;
 
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkArgument;
-import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 
 
 import java.io.BufferedInputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
@@ -189,6 +190,7 @@ public class Balancer {
    * balancing purpose at a datanode
    * balancing purpose at a datanode
    */
    */
   public static final int MAX_NUM_CONCURRENT_MOVES = 5;
   public static final int MAX_NUM_CONCURRENT_MOVES = 5;
+  private static final int MAX_NO_PENDING_BLOCK_ITERATIONS = 5;
   
   
   private static final String USAGE = "Usage: java "
   private static final String USAGE = "Usage: java "
       + Balancer.class.getSimpleName()
       + Balancer.class.getSimpleName()
@@ -224,7 +226,6 @@ public class Balancer {
                  = new HashMap<String, BalancerDatanode>();
                  = new HashMap<String, BalancerDatanode>();
   
   
   private NetworkTopology cluster;
   private NetworkTopology cluster;
-  
   final static private int MOVER_THREAD_POOL_SIZE = 1000;
   final static private int MOVER_THREAD_POOL_SIZE = 1000;
   final private ExecutorService moverExecutor = 
   final private ExecutorService moverExecutor = 
     Executors.newFixedThreadPool(MOVER_THREAD_POOL_SIZE);
     Executors.newFixedThreadPool(MOVER_THREAD_POOL_SIZE);
@@ -752,6 +753,7 @@ public class Balancer {
       long startTime = Time.now();
       long startTime = Time.now();
       this.blocksToReceive = 2*scheduledSize;
       this.blocksToReceive = 2*scheduledSize;
       boolean isTimeUp = false;
       boolean isTimeUp = false;
+      int noPendingBlockIteration = 0;
       while(!isTimeUp && scheduledSize>0 &&
       while(!isTimeUp && scheduledSize>0 &&
           (!srcBlockList.isEmpty() || blocksToReceive>0)) {
           (!srcBlockList.isEmpty() || blocksToReceive>0)) {
         PendingBlockMove pendingBlock = chooseNextBlockToMove();
         PendingBlockMove pendingBlock = chooseNextBlockToMove();
@@ -775,7 +777,15 @@ public class Balancer {
             LOG.warn("Exception while getting block list", e);
             LOG.warn("Exception while getting block list", e);
             return;
             return;
           }
           }
-        } 
+        } else {
+          // source node cannot find a pendingBlockToMove, iteration +1
+          noPendingBlockIteration++;
+          // in case no blocks can be moved for source node's task,
+          // jump out of while-loop after 5 iterations.
+          if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) {
+            scheduledSize = 0;
+          }
+        }
         
         
         // check if time is up or not
         // check if time is up or not
         if (Time.now()-startTime > MAX_ITERATION_TIME) {
         if (Time.now()-startTime > MAX_ITERATION_TIME) {
@@ -801,8 +811,8 @@ public class Balancer {
    */
    */
   private static void checkReplicationPolicyCompatibility(Configuration conf
   private static void checkReplicationPolicyCompatibility(Configuration conf
       ) throws UnsupportedActionException {
       ) throws UnsupportedActionException {
-    if (BlockPlacementPolicy.getInstance(conf, null, null) instanceof 
-        BlockPlacementPolicyDefault) {
+    if (!(BlockPlacementPolicy.getInstance(conf, null, null) instanceof 
+        BlockPlacementPolicyDefault)) {
       throw new UnsupportedActionException(
       throw new UnsupportedActionException(
           "Balancer without BlockPlacementPolicyDefault");
           "Balancer without BlockPlacementPolicyDefault");
     }
     }
@@ -1085,7 +1095,6 @@ public class Balancer {
     }
     }
   };
   };
   private BytesMoved bytesMoved = new BytesMoved();
   private BytesMoved bytesMoved = new BytesMoved();
-  private int notChangedIterations = 0;
   
   
   /* Start a thread to dispatch block moves for each source. 
   /* Start a thread to dispatch block moves for each source. 
    * The thread selects blocks to move & sends request to proxy source to
    * The thread selects blocks to move & sends request to proxy source to
@@ -1370,7 +1379,7 @@ public class Balancer {
             " in this iteration");
             " in this iteration");
       }
       }
 
 
-      formatter.format("%-24s %10d  %19s  %18s  %17s\n", 
+      formatter.format("%-24s %10d  %19s  %18s  %17s%n",
           DateFormat.getDateTimeInstance().format(new Date()),
           DateFormat.getDateTimeInstance().format(new Date()),
           iteration,
           iteration,
           StringUtils.byteDesc(bytesMoved.get()),
           StringUtils.byteDesc(bytesMoved.get()),
@@ -1384,19 +1393,10 @@ public class Balancer {
        * available to move.
        * available to move.
        * Exit no byte has been moved for 5 consecutive iterations.
        * Exit no byte has been moved for 5 consecutive iterations.
        */
        */
-      if (dispatchBlockMoves() > 0) {
-        notChangedIterations = 0;
-      } else {
-        notChangedIterations++;
-        if (notChangedIterations >= 5) {
-          System.out.println(
-              "No block has been moved for 5 iterations. Exiting...");
-          return ReturnStatus.NO_MOVE_PROGRESS;
-        }
+      if (!this.nnc.shouldContinue(dispatchBlockMoves())) {
+        return ReturnStatus.NO_MOVE_PROGRESS;
       }
       }
 
 
-      // clean all lists
-      resetData(conf);
       return ReturnStatus.IN_PROGRESS;
       return ReturnStatus.IN_PROGRESS;
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
       System.out.println(e + ".  Exiting ...");
       System.out.println(e + ".  Exiting ...");
@@ -1445,6 +1445,8 @@ public class Balancer {
         for(NameNodeConnector nnc : connectors) {
         for(NameNodeConnector nnc : connectors) {
           final Balancer b = new Balancer(nnc, p, conf);
           final Balancer b = new Balancer(nnc, p, conf);
           final ReturnStatus r = b.run(iteration, formatter, conf);
           final ReturnStatus r = b.run(iteration, formatter, conf);
+          // clean all lists
+          b.resetData(conf);
           if (r == ReturnStatus.IN_PROGRESS) {
           if (r == ReturnStatus.IN_PROGRESS) {
             done = false;
             done = false;
           } else if (r != ReturnStatus.SUCCESS) {
           } else if (r != ReturnStatus.SUCCESS) {

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -52,6 +52,7 @@ import org.apache.hadoop.util.Daemon;
 class NameNodeConnector {
 class NameNodeConnector {
   private static final Log LOG = Balancer.LOG;
   private static final Log LOG = Balancer.LOG;
   private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
   private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
+  private static final int MAX_NOT_CHANGED_ITERATIONS = 5;
 
 
   final URI nameNodeUri;
   final URI nameNodeUri;
   final String blockpoolID;
   final String blockpoolID;
@@ -65,6 +66,8 @@ class NameNodeConnector {
   private final boolean encryptDataTransfer;
   private final boolean encryptDataTransfer;
   private boolean shouldRun;
   private boolean shouldRun;
   private long keyUpdaterInterval;
   private long keyUpdaterInterval;
+  // used for balancer
+  private int notChangedIterations = 0;
   private BlockTokenSecretManager blockTokenSecretManager;
   private BlockTokenSecretManager blockTokenSecretManager;
   private Daemon keyupdaterthread; // AccessKeyUpdater thread
   private Daemon keyupdaterthread; // AccessKeyUpdater thread
   private DataEncryptionKey encryptionKey;
   private DataEncryptionKey encryptionKey;
@@ -119,6 +122,20 @@ class NameNodeConnector {
     }
     }
   }
   }
 
 
+  boolean shouldContinue(long dispatchBlockMoveBytes) {
+    if (dispatchBlockMoveBytes > 0) {
+      notChangedIterations = 0;
+    } else {
+      notChangedIterations++;
+      if (notChangedIterations >= MAX_NOT_CHANGED_ITERATIONS) {
+        System.out.println("No block has been moved for "
+            + notChangedIterations + " iterations. Exiting...");
+        return false;
+      }
+    }
+    return true;
+  }
+  
   /** Get an access token for a block. */
   /** Get an access token for a block. */
   Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
   Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
       ) throws IOException {
       ) throws IOException {

+ 73 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -33,6 +33,7 @@ import java.util.Map;
 import java.util.Queue;
 import java.util.Queue;
 import java.util.Set;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicLong;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -107,8 +108,8 @@ public class BlockManager {
   private volatile long corruptReplicaBlocksCount = 0L;
   private volatile long corruptReplicaBlocksCount = 0L;
   private volatile long underReplicatedBlocksCount = 0L;
   private volatile long underReplicatedBlocksCount = 0L;
   private volatile long scheduledReplicationBlocksCount = 0L;
   private volatile long scheduledReplicationBlocksCount = 0L;
-  private volatile long excessBlocksCount = 0L;
-  private volatile long postponedMisreplicatedBlocksCount = 0L;
+  private AtomicLong excessBlocksCount = new AtomicLong(0L);
+  private AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
   
   
   /** Used by metrics */
   /** Used by metrics */
   public long getPendingReplicationBlocksCount() {
   public long getPendingReplicationBlocksCount() {
@@ -132,11 +133,11 @@ public class BlockManager {
   }
   }
   /** Used by metrics */
   /** Used by metrics */
   public long getExcessBlocksCount() {
   public long getExcessBlocksCount() {
-    return excessBlocksCount;
+    return excessBlocksCount.get();
   }
   }
   /** Used by metrics */
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
   public long getPostponedMisreplicatedBlocksCount() {
-    return postponedMisreplicatedBlocksCount;
+    return postponedMisreplicatedBlocksCount.get();
   }
   }
   /** Used by metrics */
   /** Used by metrics */
   public int getPendingDataNodeMessageCount() {
   public int getPendingDataNodeMessageCount() {
@@ -170,29 +171,34 @@ public class BlockManager {
    */
    */
   private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
   private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
 
 
-  //
-  // Keeps a TreeSet for every named node. Each treeset contains
-  // a list of the blocks that are "extra" at that location. We'll
-  // eventually remove these extras.
-  // Mapping: StorageID -> TreeSet<Block>
-  //
+  /**
+   * Maps a StorageID to the set of blocks that are "extra" for this
+   * DataNode. We'll eventually remove these extras.
+   */
   public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap =
   public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap =
     new TreeMap<String, LightWeightLinkedSet<Block>>();
     new TreeMap<String, LightWeightLinkedSet<Block>>();
 
 
-  //
-  // Store set of Blocks that need to be replicated 1 or more times.
-  // We also store pending replication-orders.
-  //
+  /**
+   * Store set of Blocks that need to be replicated 1 or more times.
+   * We also store pending replication-orders.
+   */
   public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
   public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
+
   @VisibleForTesting
   @VisibleForTesting
   final PendingReplicationBlocks pendingReplications;
   final PendingReplicationBlocks pendingReplications;
 
 
   /** The maximum number of replicas allowed for a block */
   /** The maximum number of replicas allowed for a block */
   public final short maxReplication;
   public final short maxReplication;
-  /** The maximum number of outgoing replication streams
-   *  a given node should have at one time 
-   */
+  /**
+   * The maximum number of outgoing replication streams a given node should have
+   * at one time considering all but the highest priority replications needed.
+    */
   int maxReplicationStreams;
   int maxReplicationStreams;
+  /**
+   * The maximum number of outgoing replication streams a given node should have
+   * at one time.
+   */
+  int replicationStreamsHardLimit;
   /** Minimum copies needed or else write is disallowed */
   /** Minimum copies needed or else write is disallowed */
   public final short minReplication;
   public final short minReplication;
   /** Default number of replicas */
   /** Default number of replicas */
@@ -263,9 +269,16 @@ public class BlockManager {
     this.minReplication = (short)minR;
     this.minReplication = (short)minR;
     this.maxReplication = (short)maxR;
     this.maxReplication = (short)maxR;
 
 
-    this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
-                                             DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
-    this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null;
+    this.maxReplicationStreams =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
+    this.replicationStreamsHardLimit =
+        conf.getInt(
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
+    this.shouldCheckForEnoughRacks =
+        conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
+            ? false : true;
 
 
     this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
     this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
     this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
     this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@@ -435,7 +448,8 @@ public class BlockManager {
     NumberReplicas numReplicas = new NumberReplicas();
     NumberReplicas numReplicas = new NumberReplicas();
     // source node returned is not used
     // source node returned is not used
     chooseSourceDatanode(block, containingNodes,
     chooseSourceDatanode(block, containingNodes,
-        containingLiveReplicasNodes, numReplicas);
+        containingLiveReplicasNodes, numReplicas,
+        UnderReplicatedBlocks.LEVEL);
     assert containingLiveReplicasNodes.size() == numReplicas.liveReplicas();
     assert containingLiveReplicasNodes.size() == numReplicas.liveReplicas();
     int usableReplicas = numReplicas.liveReplicas() +
     int usableReplicas = numReplicas.liveReplicas() +
                          numReplicas.decommissionedReplicas();
                          numReplicas.decommissionedReplicas();
@@ -1052,7 +1066,7 @@ public class BlockManager {
 
 
   private void postponeBlock(Block blk) {
   private void postponeBlock(Block blk) {
     if (postponedMisreplicatedBlocks.add(blk)) {
     if (postponedMisreplicatedBlocks.add(blk)) {
-      postponedMisreplicatedBlocksCount++;
+      postponedMisreplicatedBlocksCount.incrementAndGet();
     }
     }
   }
   }
   
   
@@ -1145,11 +1159,12 @@ public class BlockManager {
             liveReplicaNodes = new ArrayList<DatanodeDescriptor>();
             liveReplicaNodes = new ArrayList<DatanodeDescriptor>();
             NumberReplicas numReplicas = new NumberReplicas();
             NumberReplicas numReplicas = new NumberReplicas();
             srcNode = chooseSourceDatanode(
             srcNode = chooseSourceDatanode(
-                block, containingNodes, liveReplicaNodes, numReplicas);
+                block, containingNodes, liveReplicaNodes, numReplicas,
+                priority);
             if(srcNode == null) { // block can not be replicated from any node
             if(srcNode == null) { // block can not be replicated from any node
               LOG.debug("Block " + block + " cannot be repl from any node");
               LOG.debug("Block " + block + " cannot be repl from any node");
               continue;
               continue;
-          }
+            }
 
 
             assert liveReplicaNodes.size() == numReplicas.liveReplicas();
             assert liveReplicaNodes.size() == numReplicas.liveReplicas();
             // do not schedule more if enough replicas is already pending
             // do not schedule more if enough replicas is already pending
@@ -1339,16 +1354,34 @@ public class BlockManager {
    * since the former do not have write traffic and hence are less busy.
    * since the former do not have write traffic and hence are less busy.
    * We do not use already decommissioned nodes as a source.
    * We do not use already decommissioned nodes as a source.
    * Otherwise we choose a random node among those that did not reach their
    * Otherwise we choose a random node among those that did not reach their
-   * replication limit.
+   * replication limits.  However, if the replication is of the highest priority
+   * and all nodes have reached their replication limits, we will choose a
+   * random node despite the replication limit.
    *
    *
    * In addition form a list of all nodes containing the block
    * In addition form a list of all nodes containing the block
    * and calculate its replication numbers.
    * and calculate its replication numbers.
+   *
+   * @param block Block for which a replication source is needed
+   * @param containingNodes List to be populated with nodes found to contain the 
+   *                        given block
+   * @param nodesContainingLiveReplicas List to be populated with nodes found to
+   *                                    contain live replicas of the given block
+   * @param numReplicas NumberReplicas instance to be initialized with the 
+   *                                   counts of live, corrupt, excess, and
+   *                                   decommissioned replicas of the given
+   *                                   block.
+   * @param priority integer representing replication priority of the given
+   *                 block
+   * @return the DatanodeDescriptor of the chosen node from which to replicate
+   *         the given block
    */
    */
-  private DatanodeDescriptor chooseSourceDatanode(
+   @VisibleForTesting
+   DatanodeDescriptor chooseSourceDatanode(
                                     Block block,
                                     Block block,
                                     List<DatanodeDescriptor> containingNodes,
                                     List<DatanodeDescriptor> containingNodes,
                                     List<DatanodeDescriptor> nodesContainingLiveReplicas,
                                     List<DatanodeDescriptor> nodesContainingLiveReplicas,
-                                    NumberReplicas numReplicas) {
+                                    NumberReplicas numReplicas,
+                                    int priority) {
     containingNodes.clear();
     containingNodes.clear();
     nodesContainingLiveReplicas.clear();
     nodesContainingLiveReplicas.clear();
     DatanodeDescriptor srcNode = null;
     DatanodeDescriptor srcNode = null;
@@ -1377,8 +1410,15 @@ public class BlockManager {
       // If so, do not select the node as src node
       // If so, do not select the node as src node
       if ((nodesCorrupt != null) && nodesCorrupt.contains(node))
       if ((nodesCorrupt != null) && nodesCorrupt.contains(node))
         continue;
         continue;
-      if(node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams)
+      if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY
+          && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams)
+      {
         continue; // already reached replication limit
         continue; // already reached replication limit
+      }
+      if (node.getNumberOfBlocksToBeReplicated() >= replicationStreamsHardLimit)
+      {
+        continue;
+      }
       // the block must not be scheduled for removal on srcNode
       // the block must not be scheduled for removal on srcNode
       if(excessBlocks != null && excessBlocks.contains(block))
       if(excessBlocks != null && excessBlocks.contains(block))
         continue;
         continue;
@@ -1558,7 +1598,7 @@ public class BlockManager {
               "in block map.");
               "in block map.");
         }
         }
         it.remove();
         it.remove();
-        postponedMisreplicatedBlocksCount--;
+        postponedMisreplicatedBlocksCount.decrementAndGet();
         continue;
         continue;
       }
       }
       MisReplicationResult res = processMisReplicatedBlock(bi);
       MisReplicationResult res = processMisReplicatedBlock(bi);
@@ -1568,7 +1608,7 @@ public class BlockManager {
       }
       }
       if (res != MisReplicationResult.POSTPONE) {
       if (res != MisReplicationResult.POSTPONE) {
         it.remove();
         it.remove();
-        postponedMisreplicatedBlocksCount--;
+        postponedMisreplicatedBlocksCount.decrementAndGet();
       }
       }
     }
     }
   }
   }
@@ -2405,7 +2445,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       excessReplicateMap.put(dn.getStorageID(), excessBlocks);
       excessReplicateMap.put(dn.getStorageID(), excessBlocks);
     }
     }
     if (excessBlocks.add(block)) {
     if (excessBlocks.add(block)) {
-      excessBlocksCount++;
+      excessBlocksCount.incrementAndGet();
       if(blockLog.isDebugEnabled()) {
       if(blockLog.isDebugEnabled()) {
         blockLog.debug("BLOCK* addToExcessReplicate:"
         blockLog.debug("BLOCK* addToExcessReplicate:"
             + " (" + dn + ", " + block
             + " (" + dn + ", " + block
@@ -2453,7 +2493,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
           .getStorageID());
           .getStorageID());
       if (excessBlocks != null) {
       if (excessBlocks != null) {
         if (excessBlocks.remove(block)) {
         if (excessBlocks.remove(block)) {
-          excessBlocksCount--;
+          excessBlocksCount.decrementAndGet();
           if(blockLog.isDebugEnabled()) {
           if(blockLog.isDebugEnabled()) {
             blockLog.debug("BLOCK* removeStoredBlock: "
             blockLog.debug("BLOCK* removeStoredBlock: "
                 + block + " is removed from excessBlocks");
                 + block + " is removed from excessBlocks");
@@ -2798,7 +2838,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     // Remove the block from pendingReplications
     // Remove the block from pendingReplications
     pendingReplications.remove(block);
     pendingReplications.remove(block);
     if (postponedMisreplicatedBlocks.remove(block)) {
     if (postponedMisreplicatedBlocks.remove(block)) {
-      postponedMisreplicatedBlocksCount--;
+      postponedMisreplicatedBlocksCount.decrementAndGet();
     }
     }
   }
   }
 
 

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -236,13 +236,18 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                + totalReplicasExpected + "\n"
                + totalReplicasExpected + "\n"
                + e.getMessage());
                + e.getMessage());
       if (avoidStaleNodes) {
       if (avoidStaleNodes) {
-        // excludedNodes now has - initial excludedNodes, any nodes that were
-        // chosen and nodes that were tried but were not chosen because they
-        // were stale, decommissioned or for any other reason a node is not
-        // chosen for write. Retry again now not avoiding stale node
+        // Retry chooseTarget again, this time not avoiding stale nodes.
+
+        // excludedNodes contains the initial excludedNodes and nodes that were
+        // not chosen because they were stale, decommissioned, etc.
+        // We need to additionally exclude the nodes that were added to the 
+        // result list in the successful calls to choose*() above.
         for (Node node : results) {
         for (Node node : results) {
           oldExcludedNodes.put(node, node);
           oldExcludedNodes.put(node, node);
         }
         }
+        // Set numOfReplicas, since it can get out of sync with the result list
+        // if the NotEnoughReplicasException was thrown in chooseRandom().
+        numOfReplicas = totalReplicasExpected - results.size();
         return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize,
         return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize,
             maxNodesPerRack, results, false);
             maxNodesPerRack, results, false);
       }
       }
@@ -542,7 +547,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
           threadLocalBuilder.get().append(node.toString()).append(": ")
           threadLocalBuilder.get().append(node.toString()).append(": ")
               .append("Node ").append(NodeBase.getPath(node))
               .append("Node ").append(NodeBase.getPath(node))
-              .append(" is not chosen because the node is staled ");
+              .append(" is not chosen because the node is stale ");
         }
         }
         return false;
         return false;
       }
       }

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.http.HtmlQuoting;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
@@ -69,6 +70,8 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 
 
+import com.google.common.base.Charsets;
+
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
 
 
@@ -178,7 +181,7 @@ public class JspHelper {
         s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
         s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
       } catch (IOException e) {
       } catch (IOException e) {
         deadNodes.add(chosenNode);
         deadNodes.add(chosenNode);
-        s.close();
+        IOUtils.closeSocket(s);
         s = null;
         s = null;
         failures++;
         failures++;
       }
       }
@@ -228,7 +231,7 @@ public class JspHelper {
     }
     }
     blockReader = null;
     blockReader = null;
     s.close();
     s.close();
-    out.print(HtmlQuoting.quoteHtmlChars(new String(buf)));
+    out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
   }
   }
 
 
   public static void addTableHeader(JspWriter out) throws IOException {
   public static void addTableHeader(JspWriter out) throws IOException {
@@ -382,6 +385,8 @@ public class JspHelper {
           int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
           int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
           ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
           ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
           break;
           break;
+        default:
+          throw new IllegalArgumentException("Invalid sortField");
         }
         }
         return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
         return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
       }
       }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -44,6 +44,8 @@ import org.apache.hadoop.util.VersionInfo;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
+import com.google.common.base.Charsets;
+
 
 
 
 
 /**
 /**
@@ -658,7 +660,7 @@ public abstract class Storage extends StorageInfo {
       FileLock res = null;
       FileLock res = null;
       try {
       try {
         res = file.getChannel().tryLock();
         res = file.getChannel().tryLock();
-        file.write(jvmName.getBytes());
+        file.write(jvmName.getBytes(Charsets.UTF_8));
         LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName);
         LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName);
       } catch(OverlappingFileLockException oe) {
       } catch(OverlappingFileLockException oe) {
         LOG.error("It appears that another namenode " + file.readLine() 
         LOG.error("It appears that another namenode " + file.readLine() 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -395,7 +395,7 @@ class BPOfferService {
   }
   }
 
 
   @VisibleForTesting
   @VisibleForTesting
-  synchronized List<BPServiceActor> getBPServiceActors() {
+  List<BPServiceActor> getBPServiceActors() {
     return Lists.newArrayList(bpServices);
     return Lists.newArrayList(bpServices);
   }
   }
   
   

+ 18 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -388,8 +388,8 @@ class BlockPoolSliceScanner {
       try {
       try {
         adjustThrottler();
         adjustThrottler();
         
         
-        blockSender = new BlockSender(block, 0, -1, false, true, datanode,
-            null);
+        blockSender = new BlockSender(block, 0, -1, false, true, true, 
+            datanode, null);
 
 
         DataOutputStream out = 
         DataOutputStream out = 
                 new DataOutputStream(new IOUtils.NullOutputStream());
                 new DataOutputStream(new IOUtils.NullOutputStream());
@@ -703,7 +703,7 @@ class BlockPoolSliceScanner {
           (info.lastScanType == ScanType.VERIFICATION_SCAN) ? "local" : "none"; 
           (info.lastScanType == ScanType.VERIFICATION_SCAN) ? "local" : "none"; 
         buffer.append(String.format("%-26s : status : %-6s type : %-6s" +
         buffer.append(String.format("%-26s : status : %-6s type : %-6s" +
                                     " scan time : " +
                                     " scan time : " +
-                                    "%-15d %s\n", info.block, 
+                                    "%-15d %s%n", info.block, 
                                     (info.lastScanOk ? "ok" : "failed"),
                                     (info.lastScanOk ? "ok" : "failed"),
                                     scanType, scanTime,
                                     scanType, scanTime,
                                     (scanTime <= 0) ? "not yet verified" : 
                                     (scanTime <= 0) ? "not yet verified" : 
@@ -716,21 +716,21 @@ class BlockPoolSliceScanner {
     double pctProgress = (totalBytesToScan == 0) ? 100 :
     double pctProgress = (totalBytesToScan == 0) ? 100 :
                          (totalBytesToScan-bytesLeft)*100.0/totalBytesToScan;
                          (totalBytesToScan-bytesLeft)*100.0/totalBytesToScan;
                          
                          
-    buffer.append(String.format("\nTotal Blocks                 : %6d" +
-                                "\nVerified in last hour        : %6d" +
-                                "\nVerified in last day         : %6d" +
-                                "\nVerified in last week        : %6d" +
-                                "\nVerified in last four weeks  : %6d" +
-                                "\nVerified in SCAN_PERIOD      : %6d" +
-                                "\nNot yet verified             : %6d" +
-                                "\nVerified since restart       : %6d" +
-                                "\nScans since restart          : %6d" +
-                                "\nScan errors since restart    : %6d" +
-                                "\nTransient scan errors        : %6d" +
-                                "\nCurrent scan rate limit KBps : %6d" +
-                                "\nProgress this period         : %6.0f%%" +
-                                "\nTime left in cur period      : %6.2f%%" +
-                                "\n", 
+    buffer.append(String.format("%nTotal Blocks                 : %6d" +
+                                "%nVerified in last hour        : %6d" +
+                                "%nVerified in last day         : %6d" +
+                                "%nVerified in last week        : %6d" +
+                                "%nVerified in last four weeks  : %6d" +
+                                "%nVerified in SCAN_PERIOD      : %6d" +
+                                "%nNot yet verified             : %6d" +
+                                "%nVerified since restart       : %6d" +
+                                "%nScans since restart          : %6d" +
+                                "%nScan errors since restart    : %6d" +
+                                "%nTransient scan errors        : %6d" +
+                                "%nCurrent scan rate limit KBps : %6d" +
+                                "%nProgress this period         : %6.0f%%" +
+                                "%nTime left in cur period      : %6.2f%%" +
+                                "%n", 
                                 total, inOneHour, inOneDay, inOneWeek,
                                 total, inOneHour, inOneDay, inOneWeek,
                                 inFourWeeks, inScanPeriod, neverScanned,
                                 inFourWeeks, inScanPeriod, neverScanned,
                                 totalScans, totalScans, 
                                 totalScans, totalScans, 

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -78,6 +78,10 @@ public class BlockPoolSliceStorage extends Storage {
     this.clusterID = clusterId;
     this.clusterID = clusterId;
   }
   }
 
 
+  private BlockPoolSliceStorage() {
+    super(NodeType.DATA_NODE);
+  }
+
   /**
   /**
    * Analyze storage directories. Recover from previous transitions if required.
    * Analyze storage directories. Recover from previous transitions if required.
    * 
    * 
@@ -378,7 +382,7 @@ public class BlockPoolSliceStorage extends Storage {
     if (!prevDir.exists())
     if (!prevDir.exists())
       return;
       return;
     // read attributes out of the VERSION file of previous directory
     // read attributes out of the VERSION file of previous directory
-    DataStorage prevInfo = new DataStorage();
+    BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
     prevInfo.readPreviousVersionProperties(bpSd);
     prevInfo.readPreviousVersionProperties(bpSd);
 
 
     // We allow rollback to a state, which is either consistent with
     // We allow rollback to a state, which is either consistent with

+ 54 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -45,6 +45,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
+import com.google.common.base.Preconditions;
+
 /**
 /**
  * Reads a block from the disk and sends it to a recipient.
  * Reads a block from the disk and sends it to a recipient.
  * 
  * 
@@ -158,12 +160,14 @@ class BlockSender implements java.io.Closeable {
    * @param length length of data to read
    * @param length length of data to read
    * @param corruptChecksumOk
    * @param corruptChecksumOk
    * @param verifyChecksum verify checksum while reading the data
    * @param verifyChecksum verify checksum while reading the data
+   * @param sendChecksum send checksum to client.
    * @param datanode datanode from which the block is being read
    * @param datanode datanode from which the block is being read
    * @param clientTraceFmt format string used to print client trace logs
    * @param clientTraceFmt format string used to print client trace logs
    * @throws IOException
    * @throws IOException
    */
    */
   BlockSender(ExtendedBlock block, long startOffset, long length,
   BlockSender(ExtendedBlock block, long startOffset, long length,
               boolean corruptChecksumOk, boolean verifyChecksum,
               boolean corruptChecksumOk, boolean verifyChecksum,
+              boolean sendChecksum,
               DataNode datanode, String clientTraceFmt)
               DataNode datanode, String clientTraceFmt)
       throws IOException {
       throws IOException {
     try {
     try {
@@ -175,6 +179,13 @@ class BlockSender implements java.io.Closeable {
       this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
       this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
       this.datanode = datanode;
       this.datanode = datanode;
       
       
+      if (verifyChecksum) {
+        // To simplify implementation, callers may not specify verification
+        // without sending.
+        Preconditions.checkArgument(sendChecksum,
+            "If verifying checksum, currently must also send it.");
+      }
+      
       final Replica replica;
       final Replica replica;
       final long replicaVisibleLength;
       final long replicaVisibleLength;
       synchronized(datanode.data) { 
       synchronized(datanode.data) { 
@@ -213,29 +224,37 @@ class BlockSender implements java.io.Closeable {
        * False,  True: will verify checksum
        * False,  True: will verify checksum
        * False, False: throws IOException file not found
        * False, False: throws IOException file not found
        */
        */
-      DataChecksum csum;
-      final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
-      if (!corruptChecksumOk || metaIn != null) {
-      	if (metaIn == null) {
-          //need checksum but meta-data not found
-          throw new FileNotFoundException("Meta-data not found for " + block);
-        } 
-      	
-        checksumIn = new DataInputStream(
-            new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
-
-        // read and handle the common header here. For now just a version
-        BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
-        short version = header.getVersion();
-        if (version != BlockMetadataHeader.VERSION) {
-          LOG.warn("Wrong version (" + version + ") for metadata file for "
-              + block + " ignoring ...");
+      DataChecksum csum = null;
+      if (verifyChecksum || sendChecksum) {
+        final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
+        if (!corruptChecksumOk || metaIn != null) {
+          if (metaIn == null) {
+            //need checksum but meta-data not found
+            throw new FileNotFoundException("Meta-data not found for " + block);
+          }
+
+          checksumIn = new DataInputStream(
+              new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
+  
+          // read and handle the common header here. For now just a version
+          BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
+          short version = header.getVersion();
+          if (version != BlockMetadataHeader.VERSION) {
+            LOG.warn("Wrong version (" + version + ") for metadata file for "
+                + block + " ignoring ...");
+          }
+          csum = header.getChecksum();
+        } else {
+          LOG.warn("Could not find metadata file for " + block);
         }
         }
-        csum = header.getChecksum();
-      } else {
-        LOG.warn("Could not find metadata file for " + block);
-        // This only decides the buffer size. Use BUFFER_SIZE?
-        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 16 * 1024);
+      }
+      if (csum == null) {
+        // The number of bytes per checksum here determines the alignment
+        // of reads: we always start reading at a checksum chunk boundary,
+        // even if the checksum type is NULL. So, choosing too big of a value
+        // would risk sending too much unnecessary data. 512 (1 disk sector)
+        // is likely to result in minimal extra IO.
+        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
       }
       }
 
 
       /*
       /*
@@ -649,7 +668,7 @@ class BlockSender implements java.io.Closeable {
 
 
       ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);
       ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);
 
 
-      while (endOffset > offset) {
+      while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
         manageOsCache();
         manageOsCache();
         long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
         long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
             transferTo, throttler);
             transferTo, throttler);
@@ -657,16 +676,19 @@ class BlockSender implements java.io.Closeable {
         totalRead += len + (numberOfChunks(len) * checksumSize);
         totalRead += len + (numberOfChunks(len) * checksumSize);
         seqno++;
         seqno++;
       }
       }
-      try {
-        // send an empty packet to mark the end of the block
-        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
-            throttler);
-        out.flush();
-      } catch (IOException e) { //socket error
-        throw ioeToSocketException(e);
-      }
+      // If this thread was interrupted, then it did not send the full block.
+      if (!Thread.currentThread().isInterrupted()) {
+        try {
+          // send an empty packet to mark the end of the block
+          sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
+              throttler);
+          out.flush();
+        } catch (IOException e) { //socket error
+          throw ioeToSocketException(e);
+        }
 
 
-      sentEntireByteRange = true;
+        sentEntireByteRange = true;
+      }
     } finally {
     } finally {
       if (clientTraceFmt != null) {
       if (clientTraceFmt != null) {
         final long endTime = System.nanoTime();
         final long endTime = System.nanoTime();

+ 20 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -98,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
@@ -115,6 +114,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -970,29 +970,27 @@ public class DataNode extends Configured
     dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
     dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
   }
   }
   
   
+  /**
+   * @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp"
+   */
   static String createNewStorageId(int port) {
   static String createNewStorageId(int port) {
-    /* Return 
-     * "DS-randInt-ipaddr-currentTimeMillis"
-     * It is considered extermely rare for all these numbers to match
-     * on a different machine accidentally for the following 
-     * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and
-     * b) Good chance ip address would be different, and
-     * c) Even on the same machine, Datanode is designed to use different ports.
-     * d) Good chance that these are started at different times.
-     * For a confict to occur all the 4 above have to match!.
-     * The format of this string can be changed anytime in future without
-     * affecting its functionality.
-     */
+    // It is unlikely that we will create a non-unique storage ID
+    // for the following reasons:
+    // a) SecureRandom is a cryptographically strong random number generator
+    // b) IP addresses will likely differ on different hosts
+    // c) DataNode xfer ports will differ on the same host
+    // d) StorageIDs will likely be generated at different times (in ms)
+    // A conflict requires that all four conditions are violated.
+    // NB: The format of this string can be changed in the future without
+    // requiring that old SotrageIDs be updated.
     String ip = "unknownIP";
     String ip = "unknownIP";
     try {
     try {
       ip = DNS.getDefaultIP("default");
       ip = DNS.getDefaultIP("default");
     } catch (UnknownHostException ignored) {
     } catch (UnknownHostException ignored) {
-      LOG.warn("Could not find ip address of \"default\" inteface.");
+      LOG.warn("Could not find an IP address for the \"default\" inteface.");
     }
     }
-    
     int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
     int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
-    return "DS-" + rand + "-" + ip + "-" + port + "-"
-        + Time.now();
+    return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now();
   }
   }
   
   
   /** Ensure the authentication method is kerberos */
   /** Ensure the authentication method is kerberos */
@@ -1443,7 +1441,7 @@ public class DataNode extends Configured
             HdfsConstants.SMALL_BUFFER_SIZE));
             HdfsConstants.SMALL_BUFFER_SIZE));
         in = new DataInputStream(unbufIn);
         in = new DataInputStream(unbufIn);
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
-            false, false, DataNode.this, null);
+            false, false, true, DataNode.this, null);
         DatanodeInfo srcNode = new DatanodeInfo(bpReg);
         DatanodeInfo srcNode = new DatanodeInfo(bpReg);
 
 
         //
         //
@@ -1468,7 +1466,7 @@ public class DataNode extends Configured
         // read ack
         // read ack
         if (isClient) {
         if (isClient) {
           DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
           DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
-              HdfsProtoUtil.vintPrefixed(in));
+              PBHelper.vintPrefixed(in));
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
             LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
           }
           }
@@ -1908,10 +1906,11 @@ public class DataNode extends Configured
   }
   }
 
 
   /**
   /**
-   * Get namenode corresponding to a block pool
+   * Get the NameNode corresponding to the given block pool.
+   *
    * @param bpid Block pool Id
    * @param bpid Block pool Id
    * @return Namenode corresponding to the bpid
    * @return Namenode corresponding to the bpid
-   * @throws IOException
+   * @throws IOException if unable to get the corresponding NameNode
    */
    */
   public DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP(String bpid)
   public DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP(String bpid)
       throws IOException {
       throws IOException {
@@ -1935,11 +1934,6 @@ public class DataNode extends Configured
     final String bpid = block.getBlockPoolId();
     final String bpid = block.getBlockPoolId();
     DatanodeProtocolClientSideTranslatorPB nn =
     DatanodeProtocolClientSideTranslatorPB nn =
       getActiveNamenodeForBP(block.getBlockPoolId());
       getActiveNamenodeForBP(block.getBlockPoolId());
-    if (nn == null) {
-      throw new IOException(
-          "Unable to synchronize block " + rBlock + ", since this DN "
-          + " has not acknowledged any NN as active.");
-    }
     
     
     long recoveryId = rBlock.getNewGenerationStamp();
     long recoveryId = rBlock.getNewGenerationStamp();
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecker;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class DataStorage extends Storage {
 public class DataStorage extends Storage {
-  // Constants
+
   public final static String BLOCK_SUBDIR_PREFIX = "subdir";
   public final static String BLOCK_SUBDIR_PREFIX = "subdir";
   final static String BLOCK_FILE_PREFIX = "blk_";
   final static String BLOCK_FILE_PREFIX = "blk_";
   final static String COPY_FILE_PREFIX = "dncp_";
   final static String COPY_FILE_PREFIX = "dncp_";
@@ -71,13 +71,13 @@ public class DataStorage extends Storage {
   public final static String STORAGE_DIR_FINALIZED = "finalized";
   public final static String STORAGE_DIR_FINALIZED = "finalized";
   public final static String STORAGE_DIR_TMP = "tmp";
   public final static String STORAGE_DIR_TMP = "tmp";
 
 
-  /** Access to this variable is guarded by "this" */
+  /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */
   private String storageID;
   private String storageID;
 
 
-  // flag to ensure initialzing storage occurs only once
-  private boolean initilized = false;
+  // Flag to ensure we only initialize storage once
+  private boolean initialized = false;
   
   
-  // BlockPoolStorage is map of <Block pool Id, BlockPoolStorage>
+  // Maps block pool IDs to block pool storage
   private Map<String, BlockPoolSliceStorage> bpStorageMap
   private Map<String, BlockPoolSliceStorage> bpStorageMap
       = Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
       = Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
 
 
@@ -130,7 +130,7 @@ public class DataStorage extends Storage {
   synchronized void recoverTransitionRead(DataNode datanode,
   synchronized void recoverTransitionRead(DataNode datanode,
       NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt)
       NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt)
       throws IOException {
       throws IOException {
-    if (initilized) {
+    if (initialized) {
       // DN storage has been initialized, no need to do anything
       // DN storage has been initialized, no need to do anything
       return;
       return;
     }
     }
@@ -200,7 +200,7 @@ public class DataStorage extends Storage {
     this.writeAll();
     this.writeAll();
     
     
     // 4. mark DN storage is initilized
     // 4. mark DN storage is initilized
-    this.initilized = true;
+    this.initialized = true;
   }
   }
 
 
   /**
   /**

Some files were not shown because too many files changed in this diff