瀏覽代碼

Merge trunk into HDFS-3077 branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1377092 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 12 年之前
父節點
當前提交
a8ff292669
共有 100 個文件被更改,包括 3469 次插入813 次删除
  1. 1 1
      hadoop-common-project/hadoop-annotations/pom.xml
  2. 7 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  3. 3 2
      hadoop-common-project/hadoop-auth/pom.xml
  4. 83 5
      hadoop-common-project/hadoop-common/CHANGES.txt
  5. 12 61
      hadoop-common-project/hadoop-common/pom.xml
  6. 45 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  7. 21 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  8. 10 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
  9. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  10. 6 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  11. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  12. 14 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  13. 133 49
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  14. 6 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  15. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  16. 18 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
  17. 41 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32CastagnoliFileChecksum.java
  18. 58 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  19. 40 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32GzipFileChecksum.java
  20. 128 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
  21. 0 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
  22. 31 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  23. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
  24. 13 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
  25. 9 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  26. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  27. 13 62
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  28. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  29. 19 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  30. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  31. 13 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  32. 42 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  33. 20 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  34. 63 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
  35. 63 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
  36. 70 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  37. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  38. 60 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
  39. 69 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
  40. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
  41. 1 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
  42. 1 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
  43. 16 6
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
  44. 7 13
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
  45. 3 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
  46. 2 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
  47. 2 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
  48. 1 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
  49. 17 3
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  50. 7 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  51. 11 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  52. 11 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
  53. 37 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  54. 2 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
  55. 7 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
  56. 279 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemTokens.java
  57. 21 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  58. 70 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java
  59. 0 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
  60. 12 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
  61. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  62. 4 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
  63. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCommandFactory.java
  64. 38 40
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
  65. 120 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java
  66. 9 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  67. 39 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
  68. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java
  69. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
  70. 18 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java
  71. 105 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
  72. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
  73. 79 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  74. 10 11
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java
  75. 141 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
  76. 15 15
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  77. 0 14
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  78. 14 51
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
  79. 1 25
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
  80. 6 21
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
  81. 3 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
  82. 59 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  83. 79 25
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  84. 13 3
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  85. 51 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/BlockStorageLocation.java
  86. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  87. 48 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
  88. 74 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
  89. 49 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java
  90. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  91. 337 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
  92. 51 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
  93. 174 48
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  94. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  95. 53 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  96. 42 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  97. 18 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
  98. 0 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  99. 94 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java
  100. 1 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

+ 1 - 1
hadoop-common-project/hadoop-annotations/pom.xml

@@ -34,7 +34,7 @@
     <dependency>
     <dependency>
       <groupId>jdiff</groupId>
       <groupId>jdiff</groupId>
       <artifactId>jdiff</artifactId>
       <artifactId>jdiff</artifactId>
-      <scope>compile</scope>
+      <scope>provided</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
 
 

+ 7 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -42,15 +42,20 @@
       <artifactId>hadoop-auth</artifactId>
       <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>log4j</groupId>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <artifactId>log4j</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <artifactId>slf4j-log4j12</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
 
 

+ 3 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -38,6 +38,7 @@
 
 
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
+      <!-- Used, even though 'mvn dependency:analyze' doesn't find it -->
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-annotations</artifactId>
       <artifactId>hadoop-annotations</artifactId>
       <scope>provided</scope>
       <scope>provided</scope>
@@ -75,12 +76,12 @@
     <dependency>
     <dependency>
       <groupId>log4j</groupId>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <artifactId>log4j</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <artifactId>slf4j-log4j12</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
 
 

+ 83 - 5
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -91,6 +91,10 @@ Trunk (unreleased changes)
     HADOOP-8624. ProtobufRpcEngine should log all RPCs if TRACE logging is
     HADOOP-8624. ProtobufRpcEngine should log all RPCs if TRACE logging is
     enabled (todd)
     enabled (todd)
 
 
+    HADOOP-8711. IPC Server supports adding exceptions for which
+    the message is printed and the stack trace is not printed to avoid chatter.
+    (Brandon Li via Suresh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -198,6 +202,10 @@ Branch-2 ( Unreleased changes )
     HADOOP-8388. Remove unused BlockLocation serialization.
     HADOOP-8388. Remove unused BlockLocation serialization.
     (Colin Patrick McCabe via eli)
     (Colin Patrick McCabe via eli)
 
 
+    HADOOP-8689. Make trash a server side configuration option. (eli)
+
+    HADOOP-8710. Remove ability for users to easily run the trash emptire. (eli)
+
   NEW FEATURES
   NEW FEATURES
  
  
     HDFS-3042. Automatic failover support for NameNode HA (todd)
     HDFS-3042. Automatic failover support for NameNode HA (todd)
@@ -218,6 +226,9 @@ Branch-2 ( Unreleased changes )
     HADOOP-7754. Expose file descriptors from Hadoop-wrapped local 
     HADOOP-7754. Expose file descriptors from Hadoop-wrapped local 
     FileSystems (todd and ahmed via tucu)
     FileSystems (todd and ahmed via tucu)
 
 
+    HADOOP-8240. Add a new API to allow users to specify a checksum type
+    on FileSystem.create(..).  (Kihwal Lee via szetszwo)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
     HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
@@ -289,6 +300,20 @@ Branch-2 ( Unreleased changes )
 
 
     HADOOP-8687. Upgrade log4j to 1.2.17. (eli)
     HADOOP-8687. Upgrade log4j to 1.2.17. (eli)
 
 
+    HADOOP-8278. Make sure components declare correct set of dependencies.
+    (tomwhite)
+
+    HADOOP-8700.  Use enum to define the checksum constants in DataChecksum.
+    (szetszwo)
+
+    HADOOP-8686. Fix warnings in native code. (Colin Patrick McCabe via eli)
+
+    HADOOP-8239. Add subclasses of MD5MD5CRC32FileChecksum to support file
+    checksum with CRC32C.  (Kihwal Lee via szetszwo)
+
+    HADOOP-8075. Lower native-hadoop library log from info to debug.
+    (Hızır Sefa İrken via eli)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -389,6 +414,31 @@ Branch-2 ( Unreleased changes )
     HADOOP-8659. Native libraries must build with soft-float ABI for Oracle JVM
     HADOOP-8659. Native libraries must build with soft-float ABI for Oracle JVM
     on ARM. (Trevor Robinson via todd)
     on ARM. (Trevor Robinson via todd)
 
 
+    HADOOP-8654. TextInputFormat delimiter bug (Gelesh and Jason Lowe via
+    bobby)
+
+    HADOOP-8614. IOUtils#skipFully hangs forever on EOF. 
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8720. TestLocalFileSystem should use test root subdirectory.
+    (Vlad Rozov via eli)
+
+    HADOOP-8721. ZKFC should not retry 45 times when attempting a graceful
+    fence during a failover. (Vinayakumar B via atm)
+
+    HADOOP-8632. Configuration leaking class-loaders (Costin Leau via bobby)
+
+    HADOOP-4572. Can not access user logs - Jetty is not configured by default 
+    to serve aliases/symlinks (ahmed via tucu)
+
+    HADOOP-8660. TestPseudoAuthenticator failing with NPE. (tucu)
+
+    HADOOP-8699. some common testcases create core-site.xml in test-classes
+    making other testcases to fail. (tucu)
+
+    HADOOP-8031. Configuration class fails to find embedded .jar resources; 
+    should use URL.openStream() (genman via tucu)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -421,11 +471,6 @@ Branch-2 ( Unreleased changes )
     
     
     HADOOP-8405. ZKFC tests leak ZK instances. (todd)
     HADOOP-8405. ZKFC tests leak ZK instances. (todd)
 
 
-    HADOOP-8660. TestPseudoAuthenticator failing with NPE. (tucu)
-
-    HADOOP-8699. some common testcases create core-site.xml in test-classes 
-    making other testcases to fail. (tucu)
-
 Release 2.0.0-alpha - 05-23-2012
 Release 2.0.0-alpha - 05-23-2012
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -793,10 +838,15 @@ Release 2.0.0-alpha - 05-23-2012
     HADOOP-7868. Hadoop native fails to compile when default linker
     HADOOP-7868. Hadoop native fails to compile when default linker
     option is -Wl,--as-needed. (Trevor Robinson via eli)
     option is -Wl,--as-needed. (Trevor Robinson via eli)
 
 
+    HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
+    bobby) 
+
 Release 0.23.3 - UNRELEASED
 Release 0.23.3 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    HADOOP-7967. Need generalized multi-token filesystem support (daryn)
+
   NEW FEATURES
   NEW FEATURES
 
 
   IMPROVEMENTS
   IMPROVEMENTS
@@ -899,6 +949,34 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8633. Interrupted FsShell copies may leave tmp files (Daryn Sharp
     HADOOP-8633. Interrupted FsShell copies may leave tmp files (Daryn Sharp
     via tgraves)
     via tgraves)
 
 
+    HADOOP-8703. distcpV2: turn CRC checking off for 0 byte size (Dave
+    Thompson via bobby)
+
+    HADOOP-8390. TestFileSystemCanonicalization fails with JDK7  (Trevor
+    Robinson via tgraves)
+
+    HADOOP-8692. TestLocalDirAllocator fails intermittently with JDK7 
+    (Trevor Robinson via tgraves)
+
+    HADOOP-8693. TestSecurityUtil fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HADOOP-8697. TestWritableName fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HADOOP-8695. TestPathData fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HADOOP-8611. Allow fall-back to the shell-based implementation when 
+    JNI-based users-group mapping fails (Robert Parker via bobby) 
+
+    HADOOP-8225. DistCp fails when invoked by Oozie (daryn via bobby)
+
+    HADOOP-8709. globStatus changed behavior from 0.20/1.x (Jason Lowe via
+    bobby)
+
+    HADOOP-8725. MR is broken when security is off (daryn via bobby)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 12 - 61
hadoop-common-project/hadoop-common/pom.xml

@@ -74,13 +74,13 @@
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>commons-net</groupId>
-      <artifactId>commons-net</artifactId>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>commons-io</groupId>
-      <artifactId>commons-io</artifactId>
+      <groupId>commons-net</groupId>
+      <artifactId>commons-net</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -98,17 +98,13 @@
       <artifactId>jetty-util</artifactId>
       <artifactId>jetty-util</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>asm</groupId>
-      <artifactId>asm</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>com.sun.jersey</groupId>
       <groupId>com.sun.jersey</groupId>
       <artifactId>jersey-core</artifactId>
       <artifactId>jersey-core</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
+      <!-- Used, even though 'mvn dependency:analyze' doesn't find it -->
       <groupId>com.sun.jersey</groupId>
       <groupId>com.sun.jersey</groupId>
       <artifactId>jersey-json</artifactId>
       <artifactId>jersey-json</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
@@ -121,33 +117,28 @@
     <dependency>
     <dependency>
       <groupId>tomcat</groupId>
       <groupId>tomcat</groupId>
       <artifactId>jasper-compiler</artifactId>
       <artifactId>jasper-compiler</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>tomcat</groupId>
       <groupId>tomcat</groupId>
       <artifactId>jasper-runtime</artifactId>
       <artifactId>jasper-runtime</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>javax.servlet.jsp</groupId>
       <groupId>javax.servlet.jsp</groupId>
       <artifactId>jsp-api</artifactId>
       <artifactId>jsp-api</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>commons-el</groupId>
       <groupId>commons-el</groupId>
       <artifactId>commons-el</artifactId>
       <artifactId>commons-el</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>commons-logging</groupId>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
       <artifactId>commons-logging</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging-api</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>log4j</groupId>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <artifactId>log4j</artifactId>
@@ -158,26 +149,6 @@
       <artifactId>jets3t</artifactId>
       <artifactId>jets3t</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>org.apache.mina</groupId>
-      <artifactId>mina-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftplet-api</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftpserver-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ftpserver</groupId>
-      <artifactId>ftpserver-deprecated</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>junit</groupId>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <artifactId>junit</artifactId>
@@ -188,11 +159,6 @@
       <artifactId>commons-lang</artifactId>
       <artifactId>commons-lang</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>commons-collections</groupId>
-      <artifactId>commons-collections</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>commons-configuration</groupId>
       <groupId>commons-configuration</groupId>
       <artifactId>commons-configuration</artifactId>
       <artifactId>commons-configuration</artifactId>
@@ -206,16 +172,11 @@
     <dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <artifactId>slf4j-log4j12</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jdt</groupId>
-      <artifactId>core</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>oro</groupId>
-      <artifactId>oro</artifactId>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -223,11 +184,6 @@
       <artifactId>jackson-mapper-asl</artifactId>
       <artifactId>jackson-mapper-asl</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>org.aspectj</groupId>
-      <artifactId>aspectjrt</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
       <artifactId>mockito-all</artifactId>
@@ -258,11 +214,6 @@
       <artifactId>hadoop-auth</artifactId>
       <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>com.googlecode.json-simple</groupId>
-      <artifactId>json-simple</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
     <dependency>
       <groupId>com.jcraft</groupId>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
       <artifactId>jsch</artifactId>

+ 45 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -30,6 +30,7 @@ import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.OutputStreamWriter;
 import java.io.Reader;
 import java.io.Reader;
 import java.io.Writer;
 import java.io.Writer;
+import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -219,8 +220,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private static final CopyOnWriteArrayList<String> defaultResources =
   private static final CopyOnWriteArrayList<String> defaultResources =
     new CopyOnWriteArrayList<String>();
     new CopyOnWriteArrayList<String>();
 
 
-  private static final Map<ClassLoader, Map<String, Class<?>>>
-    CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, Class<?>>>();
+  private static final Map<ClassLoader, Map<String, WeakReference<Class<?>>>>
+    CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, WeakReference<Class<?>>>>();
 
 
   /**
   /**
    * Sentinel value to store negative cache results in {@link #CACHE_CLASSES}.
    * Sentinel value to store negative cache results in {@link #CACHE_CLASSES}.
@@ -1531,28 +1532,33 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return the class object, or null if it could not be found.
    * @return the class object, or null if it could not be found.
    */
    */
   public Class<?> getClassByNameOrNull(String name) {
   public Class<?> getClassByNameOrNull(String name) {
-    Map<String, Class<?>> map;
+    Map<String, WeakReference<Class<?>>> map;
     
     
     synchronized (CACHE_CLASSES) {
     synchronized (CACHE_CLASSES) {
       map = CACHE_CLASSES.get(classLoader);
       map = CACHE_CLASSES.get(classLoader);
       if (map == null) {
       if (map == null) {
         map = Collections.synchronizedMap(
         map = Collections.synchronizedMap(
-          new WeakHashMap<String, Class<?>>());
+          new WeakHashMap<String, WeakReference<Class<?>>>());
         CACHE_CLASSES.put(classLoader, map);
         CACHE_CLASSES.put(classLoader, map);
       }
       }
     }
     }
 
 
-    Class<?> clazz = map.get(name);
+    Class<?> clazz = null;
+    WeakReference<Class<?>> ref = map.get(name); 
+    if (ref != null) {
+       clazz = ref.get();
+    }
+     
     if (clazz == null) {
     if (clazz == null) {
       try {
       try {
         clazz = Class.forName(name, true, classLoader);
         clazz = Class.forName(name, true, classLoader);
       } catch (ClassNotFoundException e) {
       } catch (ClassNotFoundException e) {
         // Leave a marker that the class isn't found
         // Leave a marker that the class isn't found
-        map.put(name, NEGATIVE_CACHE_SENTINEL);
+        map.put(name, new WeakReference<Class<?>>(NEGATIVE_CACHE_SENTINEL));
         return null;
         return null;
       }
       }
       // two putters can race here, but they'll put the same class
       // two putters can race here, but they'll put the same class
-      map.put(name, clazz);
+      map.put(name, new WeakReference<Class<?>>(clazz));
       return clazz;
       return clazz;
     } else if (clazz == NEGATIVE_CACHE_SENTINEL) {
     } else if (clazz == NEGATIVE_CACHE_SENTINEL) {
       return null; // not found
       return null; // not found
@@ -1856,6 +1862,32 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return result.entrySet().iterator();
     return result.entrySet().iterator();
   }
   }
 
 
+  private Document parse(DocumentBuilder builder, URL url)
+      throws IOException, SAXException {
+    if (!quietmode) {
+      LOG.info("parsing URL " + url);
+    }
+    if (url == null) {
+      return null;
+    }
+    return parse(builder, url.openStream());
+  }
+
+  private Document parse(DocumentBuilder builder, InputStream is)
+      throws IOException, SAXException {
+    if (!quietmode) {
+      LOG.info("parsing input stream " + is);
+    }
+    if (is == null) {
+      return null;
+    }
+    try {
+      return builder.parse(is);
+    } finally {
+      is.close();
+    }
+  }
+
   private void loadResources(Properties properties,
   private void loadResources(Properties properties,
                              ArrayList<Resource> resources,
                              ArrayList<Resource> resources,
                              boolean quiet) {
                              boolean quiet) {
@@ -1905,21 +1937,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       boolean returnCachedProperties = false;
       boolean returnCachedProperties = false;
       
       
       if (resource instanceof URL) {                  // an URL resource
       if (resource instanceof URL) {                  // an URL resource
-        URL url = (URL)resource;
-        if (url != null) {
-          if (!quiet) {
-            LOG.info("parsing " + url);
-          }
-          doc = builder.parse(url.toString());
-        }
+        doc = parse(builder, (URL)resource);
       } else if (resource instanceof String) {        // a CLASSPATH resource
       } else if (resource instanceof String) {        // a CLASSPATH resource
         URL url = getResource((String)resource);
         URL url = getResource((String)resource);
-        if (url != null) {
-          if (!quiet) {
-            LOG.info("parsing " + url);
-          }
-          doc = builder.parse(url.toString());
-        }
+        doc = parse(builder, url);
       } else if (resource instanceof Path) {          // a file resource
       } else if (resource instanceof Path) {          // a file resource
         // Can't use FileSystem API or we get an infinite loop
         // Can't use FileSystem API or we get an infinite loop
         // since FileSystem uses Configuration API.  Use java.io.File instead.
         // since FileSystem uses Configuration API.  Use java.io.File instead.
@@ -1927,22 +1948,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           .getAbsoluteFile();
           .getAbsoluteFile();
         if (file.exists()) {
         if (file.exists()) {
           if (!quiet) {
           if (!quiet) {
-            LOG.info("parsing " + file);
-          }
-          InputStream in = new BufferedInputStream(new FileInputStream(file));
-          try {
-            doc = builder.parse(in);
-          } finally {
-            in.close();
+            LOG.info("parsing File " + file);
           }
           }
+          doc = parse(builder, new BufferedInputStream(new FileInputStream(file)));
         }
         }
       } else if (resource instanceof InputStream) {
       } else if (resource instanceof InputStream) {
-        try {
-          doc = builder.parse((InputStream)resource);
-          returnCachedProperties = true;
-        } finally {
-          ((InputStream)resource).close();
-        }
+        doc = parse(builder, (InputStream) resource);
+        returnCachedProperties = true;
       } else if (resource instanceof Properties) {
       } else if (resource instanceof Properties) {
         overlay(properties, (Properties)resource);
         overlay(properties, (Properties)resource);
       } else if (resource instanceof Element) {
       } else if (resource instanceof Element) {

+ 21 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -46,6 +47,7 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /**
 /**
@@ -467,6 +469,7 @@ public abstract class AbstractFileSystem {
     short replication = -1;
     short replication = -1;
     long blockSize = -1;
     long blockSize = -1;
     int bytesPerChecksum = -1;
     int bytesPerChecksum = -1;
+    ChecksumOpt checksumOpt = null;
     FsPermission permission = null;
     FsPermission permission = null;
     Progressable progress = null;
     Progressable progress = null;
     Boolean createParent = null;
     Boolean createParent = null;
@@ -496,6 +499,12 @@ public abstract class AbstractFileSystem {
               "BytesPerChecksum option is set multiple times");
               "BytesPerChecksum option is set multiple times");
         }
         }
         bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
         bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
+      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
+        if (checksumOpt != null) {
+          throw new  HadoopIllegalArgumentException(
+              "CreateChecksumType option is set multiple times");
+        }
+        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
       } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
       } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
         if (permission != null) {
         if (permission != null) {
           throw new HadoopIllegalArgumentException(
           throw new HadoopIllegalArgumentException(
@@ -533,9 +542,16 @@ public abstract class AbstractFileSystem {
     if (blockSize == -1) {
     if (blockSize == -1) {
       blockSize = ssDef.getBlockSize();
       blockSize = ssDef.getBlockSize();
     }
     }
-    if (bytesPerChecksum == -1) {
-      bytesPerChecksum = ssDef.getBytesPerChecksum();
-    }
+
+    // Create a checksum option honoring user input as much as possible.
+    // If bytesPerChecksum is specified, it will override the one set in
+    // checksumOpt. Any missing value will be filled in using the default.
+    ChecksumOpt defaultOpt = new ChecksumOpt(
+        ssDef.getChecksumType(),
+        ssDef.getBytesPerChecksum());
+    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
+        checksumOpt, bytesPerChecksum);
+
     if (bufferSize == -1) {
     if (bufferSize == -1) {
       bufferSize = ssDef.getFileBufferSize();
       bufferSize = ssDef.getFileBufferSize();
     }
     }
@@ -552,7 +568,7 @@ public abstract class AbstractFileSystem {
     }
     }
 
 
     return this.createInternal(f, createFlag, permission, bufferSize,
     return this.createInternal(f, createFlag, permission, bufferSize,
-      replication, blockSize, progress, bytesPerChecksum, createParent);
+      replication, blockSize, progress, checksumOpt, createParent);
   }
   }
 
 
   /**
   /**
@@ -563,7 +579,7 @@ public abstract class AbstractFileSystem {
   public abstract FSDataOutputStream createInternal(Path f,
   public abstract FSDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> flag, FsPermission absolutePermission,
       EnumSet<CreateFlag> flag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bufferSize, short replication, long blockSize, Progressable progress,
-      int bytesPerChecksum, boolean createParent)
+      ChecksumOpt checksumOpt, boolean createParent)
       throws AccessControlException, FileAlreadyExistsException,
       throws AccessControlException, FileAlreadyExistsException,
       FileNotFoundException, ParentNotDirectoryException,
       FileNotFoundException, ParentNotDirectoryException,
       UnsupportedFileSystemException, UnresolvedLinkException, IOException;
       UnsupportedFileSystemException, UnresolvedLinkException, IOException;

+ 10 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.PureJavaCrc32;
@@ -324,13 +325,17 @@ public abstract class ChecksumFs extends FilterFs {
       final EnumSet<CreateFlag> createFlag,
       final EnumSet<CreateFlag> createFlag,
       final FsPermission absolutePermission, final int bufferSize,
       final FsPermission absolutePermission, final int bufferSize,
       final short replication, final long blockSize, 
       final short replication, final long blockSize, 
-      final Progressable progress, final int bytesPerChecksum,
+      final Progressable progress, final ChecksumOpt checksumOpt,
       final boolean createParent) throws IOException {
       final boolean createParent) throws IOException {
       super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
       super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
 
 
+      // checksumOpt is passed down to the raw fs. Unless it implements
+      // checksum impelemts internally, checksumOpt will be ignored.
+      // If the raw fs does checksum internally, we will end up with
+      // two layers of checksumming. i.e. checksumming checksum file.
       this.datas = fs.getRawFs().createInternal(file, createFlag,
       this.datas = fs.getRawFs().createInternal(file, createFlag,
           absolutePermission, bufferSize, replication, blockSize, progress,
           absolutePermission, bufferSize, replication, blockSize, progress,
-           bytesPerChecksum,  createParent);
+           checksumOpt,  createParent);
       
       
       // Now create the chekcsumfile; adjust the buffsize
       // Now create the chekcsumfile; adjust the buffsize
       int bytesPerSum = fs.getBytesPerSum();
       int bytesPerSum = fs.getBytesPerSum();
@@ -338,7 +343,7 @@ public abstract class ChecksumFs extends FilterFs {
       this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
       this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
           EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
           EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
           absolutePermission, sumBufferSize, replication, blockSize, progress,
           absolutePermission, sumBufferSize, replication, blockSize, progress,
-          bytesPerChecksum, createParent);
+          checksumOpt, createParent);
       sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
       sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
       sums.writeInt(bytesPerSum);
       sums.writeInt(bytesPerSum);
     }
     }
@@ -361,12 +366,11 @@ public abstract class ChecksumFs extends FilterFs {
   public FSDataOutputStream createInternal(Path f,
   public FSDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bufferSize, short replication, long blockSize, Progressable progress,
-      int bytesPerChecksum, boolean createParent) throws IOException {
-
+      ChecksumOpt checksumOpt, boolean createParent) throws IOException {
     final FSDataOutputStream out = new FSDataOutputStream(
     final FSDataOutputStream out = new FSDataOutputStream(
         new ChecksumFSOutputSummer(this, f, createFlag, absolutePermission,
         new ChecksumFSOutputSummer(this, f, createFlag, absolutePermission,
             bufferSize, replication, blockSize, progress,
             bufferSize, replication, blockSize, progress,
-            bytesPerChecksum,  createParent), null);
+            checksumOpt,  createParent), null);
     return out;
     return out;
   }
   }
 
 

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -154,6 +154,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
     "ha.failover-controller.graceful-fence.rpc-timeout.ms";
     "ha.failover-controller.graceful-fence.rpc-timeout.ms";
   public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
   public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
   
   
+  /* FC connection retries for graceful fencing */
+  public static final String HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES =
+      "ha.failover-controller.graceful-fence.connection.retries";
+  public static final int HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT = 1;
+
   /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
   /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
   public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
   public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
     "ha.failover-controller.cli-check.rpc-timeout.ms";
     "ha.failover-controller.cli-check.rpc-timeout.ms";
@@ -167,6 +172,12 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
   public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
     "dr.who";
     "dr.who";
 
 
+  /** Enable/Disable aliases serving from jetty */
+  public static final String HADOOP_JETTY_LOGS_SERVE_ALIASES =
+    "hadoop.jetty.logs.serve.aliases";
+  public static final boolean DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES =
+    true;
+
   /* Path to the Kerberos ticket cache.  Setting this will force
   /* Path to the Kerberos ticket cache.  Setting this will force
    * UserGroupInformation to use only this ticket cache file when creating a
    * UserGroupInformation to use only this ticket cache file when creating a
    * FileSystem instance.
    * FileSystem instance.

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -21,12 +21,14 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -61,7 +63,7 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
   public FSDataOutputStream createInternal (Path f,
   public FSDataOutputStream createInternal (Path f,
       EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
       EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
       short replication, long blockSize, Progressable progress,
       short replication, long blockSize, Progressable progress,
-      int bytesPerChecksum, boolean createParent) throws IOException {
+      ChecksumOpt checksumOpt, boolean createParent) throws IOException {
     checkPath(f);
     checkPath(f);
     
     
     // Default impl assumes that permissions do not matter
     // Default impl assumes that permissions do not matter
@@ -80,8 +82,8 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
       }
       }
       // parent does exist - go ahead with create of file.
       // parent does exist - go ahead with create of file.
     }
     }
-    return fsImpl.primitiveCreate(f, absolutePermission, flag, 
-        bufferSize, replication, blockSize, progress, bytesPerChecksum);
+    return fsImpl.primitiveCreate(f, absolutePermission, flag,
+        bufferSize, replication, blockSize, progress, checksumOpt);
   }
   }
 
 
   @Override
   @Override
@@ -217,6 +219,6 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
   
   
   @Override //AbstractFileSystem
   @Override //AbstractFileSystem
   public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
   public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    return fsImpl.getDelegationTokens(renewer);
+    return Arrays.asList(fsImpl.addDelegationTokens(renewer, null));
   }
   }
 }
 }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -110,7 +110,11 @@ public class DelegationTokenRenewer<T extends FileSystem & DelegationTokenRenewe
             fs.getRenewToken().renew(fs.getConf());
             fs.getRenewToken().renew(fs.getConf());
           } catch (IOException ie) {
           } catch (IOException ie) {
             try {
             try {
-              fs.setDelegationToken(fs.getDelegationTokens(null).get(0));
+              Token<?>[] tokens = fs.addDelegationTokens(null, null);
+              if (tokens.length == 0) {
+                throw new IOException("addDelegationTokens returned no tokens");
+              }
+              fs.setDelegationToken(tokens[0]);
             } catch (IOException ie2) {
             } catch (IOException ie2) {
               throw new IOException("Can't renew or get new delegation token ", ie);
               throw new IOException("Can't renew or get new delegation token ", ie);
             }
             }

+ 14 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -127,7 +127,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
  *  <li> replication factor
  *  <li> replication factor
  *  <li> block size
  *  <li> block size
  *  <li> buffer size
  *  <li> buffer size
- *  <li> bytesPerChecksum (if used).
+ *  <li> encryptDataTransfer 
+ *  <li> checksum option. (checksumType and  bytesPerChecksum)
  *  </ul>
  *  </ul>
  *
  *
  * <p>
  * <p>
@@ -613,7 +614,8 @@ public final class FileContext {
    *          <li>BufferSize - buffersize used in FSDataOutputStream
    *          <li>BufferSize - buffersize used in FSDataOutputStream
    *          <li>Blocksize - block size for file blocks
    *          <li>Blocksize - block size for file blocks
    *          <li>ReplicationFactor - replication for blocks
    *          <li>ReplicationFactor - replication for blocks
-   *          <li>BytesPerChecksum - bytes per checksum
+   *          <li>ChecksumParam - Checksum parameters. server default is used
+   *          if not specified.
    *          </ul>
    *          </ul>
    *          </ul>
    *          </ul>
    * 
    * 
@@ -2012,7 +2014,11 @@ public final class FileContext {
                     new GlobFilter(components[components.length - 1], filter);
                     new GlobFilter(components[components.length - 1], filter);
         if (fp.hasPattern()) { // last component has a pattern
         if (fp.hasPattern()) { // last component has a pattern
           // list parent directories and then glob the results
           // list parent directories and then glob the results
-          results = listStatus(parentPaths, fp);
+          try {
+            results = listStatus(parentPaths, fp);
+          } catch (FileNotFoundException e) {
+            results = null;
+          }
           hasGlob[0] = true;
           hasGlob[0] = true;
         } else { // last component does not have a pattern
         } else { // last component does not have a pattern
           // get all the path names
           // get all the path names
@@ -2063,7 +2069,11 @@ public final class FileContext {
       }
       }
       GlobFilter fp = new GlobFilter(filePattern[level]);
       GlobFilter fp = new GlobFilter(filePattern[level]);
       if (fp.hasPattern()) {
       if (fp.hasPattern()) {
-        parents = FileUtil.stat2Paths(listStatus(parents, fp));
+        try {
+          parents = FileUtil.stat2Paths(listStatus(parents, fp));
+        } catch (FileNotFoundException e) {
+          parents = null;
+        }
         hasGlob[0] = true;
         hasGlob[0] = true;
       } else {
       } else {
         for (int i = 0; i < parents.length; i++) {
         for (int i = 0; i < parents.length; i++) {

+ 133 - 49
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -45,18 +45,23 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.ShutdownHookManager;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /****************************************************************
 /****************************************************************
  * An abstract base class for a fairly generic filesystem.  It
  * An abstract base class for a fairly generic filesystem.  It
  * may be implemented as a distributed filesystem, or as a "local"
  * may be implemented as a distributed filesystem, or as a "local"
@@ -222,15 +227,25 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Get a canonical service name for this file system.  The token cache is
    * Get a canonical service name for this file system.  The token cache is
-   * the only user of this value, and uses it to lookup this filesystem's
-   * service tokens.  The token cache will not attempt to acquire tokens if the
-   * service is null.
+   * the only user of the canonical service name, and uses it to lookup this
+   * filesystem's service tokens.
+   * If file system provides a token of its own then it must have a canonical
+   * name, otherwise canonical name can be null.
+   * 
+   * Default Impl: If the file system has child file systems 
+   * (such as an embedded file system) then it is assumed that the fs has no
+   * tokens of its own and hence returns a null name; otherwise a service
+   * name is built using Uri and port.
+   * 
    * @return a service string that uniquely identifies this file system, null
    * @return a service string that uniquely identifies this file system, null
    *         if the filesystem does not implement tokens
    *         if the filesystem does not implement tokens
    * @see SecurityUtil#buildDTServiceName(URI, int) 
    * @see SecurityUtil#buildDTServiceName(URI, int) 
    */
    */
+  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
   public String getCanonicalServiceName() {
   public String getCanonicalServiceName() {
-    return SecurityUtil.buildDTServiceName(getUri(), getDefaultPort());
+    return (getChildFileSystems() == null)
+      ? SecurityUtil.buildDTServiceName(getUri(), getDefaultPort())
+      : null;
   }
   }
 
 
   /** @deprecated call #getUri() instead.*/
   /** @deprecated call #getUri() instead.*/
@@ -396,68 +411,95 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
   }
     
     
   /**
   /**
-   * Deprecated  - use @link {@link #getDelegationTokens(String)}
    * Get a new delegation token for this file system.
    * Get a new delegation token for this file system.
+   * This is an internal method that should have been declared protected
+   * but wasn't historically.
+   * Callers should use {@link #addDelegationTokens(String, Credentials)}
+   * 
    * @param renewer the account name that is allowed to renew the token.
    * @param renewer the account name that is allowed to renew the token.
    * @return a new delegation token
    * @return a new delegation token
    * @throws IOException
    * @throws IOException
    */
    */
-  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-  @Deprecated
+  @InterfaceAudience.Private()
   public Token<?> getDelegationToken(String renewer) throws IOException {
   public Token<?> getDelegationToken(String renewer) throws IOException {
     return null;
     return null;
   }
   }
   
   
   /**
   /**
-   * Get one or more delegation tokens associated with the filesystem. Normally
-   * a file system returns a single delegation token. A file system that manages
-   * multiple file systems underneath, could return set of delegation tokens for
-   * all the file systems it manages.
+   * Obtain all delegation tokens used by this FileSystem that are not
+   * already present in the given Credentials.  Existing tokens will neither
+   * be verified as valid nor having the given renewer.  Missing tokens will
+   * be acquired and added to the given Credentials.
    * 
    * 
-   * @param renewer the account name that is allowed to renew the token.
+   * Default Impl: works for simple fs with its own token
+   * and also for an embedded fs whose tokens are those of its
+   * children file system (i.e. the embedded fs has not tokens of its
+   * own).
+   * 
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
    * @return list of new delegation tokens
    * @return list of new delegation tokens
-   *    If delegation tokens not supported then return a list of size zero.
    * @throws IOException
    * @throws IOException
    */
    */
-  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    return new ArrayList<Token<?>>(0);
+  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+  public Token<?>[] addDelegationTokens(
+      final String renewer, Credentials credentials) throws IOException {
+    if (credentials == null) {
+      credentials = new Credentials();
+    }
+    final List<Token<?>> tokens = new ArrayList<Token<?>>();
+    collectDelegationTokens(renewer, credentials, tokens);
+    return tokens.toArray(new Token<?>[tokens.size()]);
   }
   }
   
   
   /**
   /**
-   * @see #getDelegationTokens(String)
-   * This is similar to getDelegationTokens, with the added restriction that if
-   * a token is already present in the passed Credentials object - that token
-   * is returned instead of a new delegation token. 
-   * 
-   * If the token is found to be cached in the Credentials object, this API does
-   * not verify the token validity or the passed in renewer. 
-   * 
-   * 
-   * @param renewer the account name that is allowed to renew the token.
-   * @param credentials a Credentials object containing already knowing 
-   *   delegationTokens.
-   * @return a list of delegation tokens.
+   * Recursively obtain the tokens for this FileSystem and all descended
+   * FileSystems as determined by getChildFileSystems().
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add the new delegation tokens
+   * @param tokens list in which to add acquired tokens
    * @throws IOException
    * @throws IOException
    */
    */
-  @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
-  public List<Token<?>> getDelegationTokens(String renewer,
-      Credentials credentials) throws IOException {
-    List<Token<?>> allTokens = getDelegationTokens(renewer);
-    List<Token<?>> newTokens = new ArrayList<Token<?>>();
-    if (allTokens != null) {
-      for (Token<?> token : allTokens) {
-        Token<?> knownToken = credentials.getToken(token.getService());
-        if (knownToken == null) {
-          newTokens.add(token);
-        } else {
-          newTokens.add(knownToken);
+  private void collectDelegationTokens(final String renewer,
+                                       final Credentials credentials,
+                                       final List<Token<?>> tokens)
+                                           throws IOException {
+    final String serviceName = getCanonicalServiceName();
+    // Collect token of the this filesystem and then of its embedded children
+    if (serviceName != null) { // fs has token, grab it
+      final Text service = new Text(serviceName);
+      Token<?> token = credentials.getToken(service);
+      if (token == null) {
+        token = getDelegationToken(renewer);
+        if (token != null) {
+          tokens.add(token);
+          credentials.addToken(service, token);
         }
         }
       }
       }
     }
     }
-    return newTokens;
+    // Now collect the tokens from the children
+    final FileSystem[] children = getChildFileSystems();
+    if (children != null) {
+      for (final FileSystem fs : children) {
+        fs.collectDelegationTokens(renewer, credentials, tokens);
+      }
+    }
   }
   }
 
 
+  /**
+   * Get all the immediate child FileSystems embedded in this FileSystem.
+   * It does not recurse and get grand children.  If a FileSystem
+   * has multiple child FileSystems, then it should return a unique list
+   * of those FileSystems.  Default is to return null to signify no children.
+   * 
+   * @return FileSystems used by this FileSystem
+   */
+  @InterfaceAudience.LimitedPrivate({ "HDFS" })
+  @VisibleForTesting
+  public FileSystem[] getChildFileSystems() {
+    return null;
+  }
+  
   /** create a file with the provided permission
   /** create a file with the provided permission
    * The permission of the file is set to be the provided permission as in
    * The permission of the file is set to be the provided permission as in
    * setPermission, not permission&~umask
    * setPermission, not permission&~umask
@@ -616,12 +658,17 @@ public abstract class FileSystem extends Configured implements Closeable {
   @Deprecated
   @Deprecated
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
     Configuration conf = getConf();
     Configuration conf = getConf();
+    // CRC32 is chosen as default as it is available in all 
+    // releases that support checksum.
+    // The client trash configuration is ignored.
     return new FsServerDefaults(getDefaultBlockSize(), 
     return new FsServerDefaults(getDefaultBlockSize(), 
         conf.getInt("io.bytes.per.checksum", 512), 
         conf.getInt("io.bytes.per.checksum", 512), 
         64 * 1024, 
         64 * 1024, 
         getDefaultReplication(),
         getDefaultReplication(),
         conf.getInt("io.file.buffer.size", 4096),
         conf.getInt("io.file.buffer.size", 4096),
-        false);
+        false,
+        CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
+        DataChecksum.Type.CRC32);
   }
   }
 
 
   /**
   /**
@@ -847,11 +894,40 @@ public abstract class FileSystem extends Configured implements Closeable {
       short replication,
       short replication,
       long blockSize,
       long blockSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
-    // only DFS support this
-    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
+    return create(f, permission, flags, bufferSize, replication,
+        blockSize, progress, null);
   }
   }
   
   
-  
+  /**
+   * Create an FSDataOutputStream at the indicated Path with a custom
+   * checksum option
+   * @param f the file name to open
+   * @param permission
+   * @param flags {@link CreateFlag}s to use for this stream.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file.
+   * @param blockSize
+   * @param progress
+   * @param checksumOpt checksum parameter. If null, the values
+   *        found in conf will be used.
+   * @throws IOException
+   * @see #setPermission(Path, FsPermission)
+   */
+  public FSDataOutputStream create(Path f,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress,
+      ChecksumOpt checksumOpt) throws IOException {
+    // Checksum options are ignored by default. The file systems that
+    // implement checksum need to override this method. The full
+    // support is currently only available in DFS.
+    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), 
+        bufferSize, replication, blockSize, progress);
+  }
+
   /*.
   /*.
    * This create has been added to support the FileContext that processes
    * This create has been added to support the FileContext that processes
    * the permission
    * the permission
@@ -863,7 +939,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   protected FSDataOutputStream primitiveCreate(Path f,
   protected FSDataOutputStream primitiveCreate(Path f,
      FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
      FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
      short replication, long blockSize, Progressable progress,
      short replication, long blockSize, Progressable progress,
-     int bytesPerChecksum) throws IOException {
+     ChecksumOpt checksumOpt) throws IOException {
 
 
     boolean pathExists = exists(f);
     boolean pathExists = exists(f);
     CreateFlag.validate(f, pathExists, flag);
     CreateFlag.validate(f, pathExists, flag);
@@ -1543,7 +1619,11 @@ public abstract class FileSystem extends Configured implements Closeable {
       GlobFilter fp = new GlobFilter(components[components.length - 1], filter);
       GlobFilter fp = new GlobFilter(components[components.length - 1], filter);
       if (fp.hasPattern()) { // last component has a pattern
       if (fp.hasPattern()) { // last component has a pattern
         // list parent directories and then glob the results
         // list parent directories and then glob the results
-        results = listStatus(parentPaths, fp);
+        try {
+          results = listStatus(parentPaths, fp);
+        } catch (FileNotFoundException e) {
+          results = null;
+        }
         hasGlob[0] = true;
         hasGlob[0] = true;
       } else { // last component does not have a pattern
       } else { // last component does not have a pattern
         // remove the quoting of metachars in a non-regexp expansion
         // remove the quoting of metachars in a non-regexp expansion
@@ -1592,7 +1672,11 @@ public abstract class FileSystem extends Configured implements Closeable {
     }
     }
     GlobFilter fp = new GlobFilter(filePattern[level]);
     GlobFilter fp = new GlobFilter(filePattern[level]);
     if (fp.hasPattern()) {
     if (fp.hasPattern()) {
-      parents = FileUtil.stat2Paths(listStatus(parents, fp));
+      try {
+        parents = FileUtil.stat2Paths(listStatus(parents, fp));
+      } catch (FileNotFoundException e) {
+        parents = null;
+      }
       hasGlob[0] = true;
       hasGlob[0] = true;
     } else { // the component does not have a pattern
     } else { // the component does not have a pattern
       // remove the quoting of metachars in a non-regexp expansion
       // remove the quoting of metachars in a non-regexp expansion

+ 6 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -22,15 +22,12 @@ import java.io.*;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.EnumSet;
-import java.util.List;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /****************************************************************
 /****************************************************************
@@ -414,10 +411,11 @@ public class FilterFileSystem extends FileSystem {
   @Override
   @Override
   protected FSDataOutputStream primitiveCreate(Path f,
   protected FSDataOutputStream primitiveCreate(Path f,
       FsPermission absolutePermission, EnumSet<CreateFlag> flag,
       FsPermission absolutePermission, EnumSet<CreateFlag> flag,
-      int bufferSize, short replication, long blockSize, Progressable progress, int bytesPerChecksum)
+      int bufferSize, short replication, long blockSize,
+      Progressable progress, ChecksumOpt checksumOpt)
       throws IOException {
       throws IOException {
     return fs.primitiveCreate(f, absolutePermission, flag,
     return fs.primitiveCreate(f, absolutePermission, flag,
-        bufferSize, replication, blockSize, progress, bytesPerChecksum);
+        bufferSize, replication, blockSize, progress, checksumOpt);
   }
   }
 
 
   @Override
   @Override
@@ -428,25 +426,7 @@ public class FilterFileSystem extends FileSystem {
   }
   }
   
   
   @Override // FileSystem
   @Override // FileSystem
-  public String getCanonicalServiceName() {
-    return fs.getCanonicalServiceName();
-  }
-  
-  @Override // FileSystem
-  @SuppressWarnings("deprecation")
-  public Token<?> getDelegationToken(String renewer) throws IOException {
-    return fs.getDelegationToken(renewer);
-  }
-  
-  @Override // FileSystem
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    return fs.getDelegationTokens(renewer);
-  }
-  
-  @Override
-  // FileSystem
-  public List<Token<?>> getDelegationTokens(String renewer,
-      Credentials credentials) throws IOException {
-    return fs.getDelegationTokens(renewer, credentials);
+  public FileSystem[] getChildFileSystems() {
+    return new FileSystem[]{fs};
   }
   }
 }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
@@ -81,11 +82,11 @@ public abstract class FilterFs extends AbstractFileSystem {
   public FSDataOutputStream createInternal(Path f,
   public FSDataOutputStream createInternal(Path f,
     EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
     EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
     short replication, long blockSize, Progressable progress,
     short replication, long blockSize, Progressable progress,
-    int bytesPerChecksum, boolean createParent) 
+    ChecksumOpt checksumOpt, boolean createParent) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
     checkPath(f);
     checkPath(f);
     return myFs.createInternal(f, flag, absolutePermission, bufferSize,
     return myFs.createInternal(f, flag, absolutePermission, bufferSize,
-        replication, blockSize, progress, bytesPerChecksum, createParent);
+        replication, blockSize, progress, checksumOpt, createParent);
   }
   }
 
 
   @Override
   @Override

+ 18 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java

@@ -26,6 +26,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.DataChecksum;
 
 
 /****************************************************
 /****************************************************
  * Provides server default configuration values to clients.
  * Provides server default configuration values to clients.
@@ -49,19 +51,24 @@ public class FsServerDefaults implements Writable {
   private short replication;
   private short replication;
   private int fileBufferSize;
   private int fileBufferSize;
   private boolean encryptDataTransfer;
   private boolean encryptDataTransfer;
+  private long trashInterval;
+  private DataChecksum.Type checksumType;
 
 
   public FsServerDefaults() {
   public FsServerDefaults() {
   }
   }
 
 
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
       int writePacketSize, short replication, int fileBufferSize,
       int writePacketSize, short replication, int fileBufferSize,
-      boolean encryptDataTransfer) {
+      boolean encryptDataTransfer, long trashInterval,
+      DataChecksum.Type checksumType) {
     this.blockSize = blockSize;
     this.blockSize = blockSize;
     this.bytesPerChecksum = bytesPerChecksum;
     this.bytesPerChecksum = bytesPerChecksum;
     this.writePacketSize = writePacketSize;
     this.writePacketSize = writePacketSize;
     this.replication = replication;
     this.replication = replication;
     this.fileBufferSize = fileBufferSize;
     this.fileBufferSize = fileBufferSize;
     this.encryptDataTransfer = encryptDataTransfer;
     this.encryptDataTransfer = encryptDataTransfer;
+    this.trashInterval = trashInterval;
+    this.checksumType = checksumType;
   }
   }
 
 
   public long getBlockSize() {
   public long getBlockSize() {
@@ -88,6 +95,14 @@ public class FsServerDefaults implements Writable {
     return encryptDataTransfer;
     return encryptDataTransfer;
   }
   }
 
 
+  public long getTrashInterval() {
+    return trashInterval;
+  }
+
+  public DataChecksum.Type getChecksumType() {
+    return checksumType;
+  }
+
   // /////////////////////////////////////////
   // /////////////////////////////////////////
   // Writable
   // Writable
   // /////////////////////////////////////////
   // /////////////////////////////////////////
@@ -98,6 +113,7 @@ public class FsServerDefaults implements Writable {
     out.writeInt(writePacketSize);
     out.writeInt(writePacketSize);
     out.writeShort(replication);
     out.writeShort(replication);
     out.writeInt(fileBufferSize);
     out.writeInt(fileBufferSize);
+    WritableUtils.writeEnum(out, checksumType);
   }
   }
 
 
   @InterfaceAudience.Private
   @InterfaceAudience.Private
@@ -107,5 +123,6 @@ public class FsServerDefaults implements Writable {
     writePacketSize = in.readInt();
     writePacketSize = in.readInt();
     replication = in.readShort();
     replication = in.readShort();
     fileBufferSize = in.readInt();
     fileBufferSize = in.readInt();
+    checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
   }
   }
 }
 }

+ 41 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32CastagnoliFileChecksum.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.util.DataChecksum;
+
+/** For CRC32 with the Castagnoli polynomial */
+public class MD5MD5CRC32CastagnoliFileChecksum extends MD5MD5CRC32FileChecksum {
+  /** Same as this(0, 0, null) */
+  public MD5MD5CRC32CastagnoliFileChecksum() {
+    this(0, 0, null);
+  }
+
+  /** Create a MD5FileChecksum */
+  public MD5MD5CRC32CastagnoliFileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
+    super(bytesPerCRC, crcPerBlock, md5);
+  }
+
+  @Override
+  public DataChecksum.Type getCrcType() {
+    // default to the one that is understood by all releases.
+    return DataChecksum.Type.CRC32C;
+  }
+}

+ 58 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -23,12 +23,17 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.DataChecksum;
 import org.xml.sax.Attributes;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
 import org.xml.sax.SAXException;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
 
 
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+
 /** MD5 of MD5 of CRC32. */
 /** MD5 of MD5 of CRC32. */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
@@ -54,7 +59,19 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
   
   
   /** {@inheritDoc} */ 
   /** {@inheritDoc} */ 
   public String getAlgorithmName() {
   public String getAlgorithmName() {
-    return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + "CRC32";
+    return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
+        getCrcType().name();
+  }
+
+  public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
+      throws IOException {
+    if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) {
+      return DataChecksum.Type.CRC32;
+    } else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) {
+      return DataChecksum.Type.CRC32C;
+    }
+
+    throw new IOException("Unknown checksum type in " + algorithm);
   }
   }
 
 
   /** {@inheritDoc} */ 
   /** {@inheritDoc} */ 
@@ -65,6 +82,16 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     return WritableUtils.toByteArray(this);
     return WritableUtils.toByteArray(this);
   }
   }
 
 
+  /** returns the CRC type */
+  public DataChecksum.Type getCrcType() {
+    // default to the one that is understood by all releases.
+    return DataChecksum.Type.CRC32;
+  }
+
+  public ChecksumOpt getChecksumOpt() {
+    return new ChecksumOpt(getCrcType(), bytesPerCRC);
+  }
+
   /** {@inheritDoc} */ 
   /** {@inheritDoc} */ 
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     bytesPerCRC = in.readInt();
     bytesPerCRC = in.readInt();
@@ -86,6 +113,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     if (that != null) {
     if (that != null) {
       xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
       xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
       xml.attribute("crcPerBlock", "" + that.crcPerBlock);
       xml.attribute("crcPerBlock", "" + that.crcPerBlock);
+      xml.attribute("crcType", ""+ that.getCrcType().name());
       xml.attribute("md5", "" + that.md5);
       xml.attribute("md5", "" + that.md5);
     }
     }
     xml.endTag();
     xml.endTag();
@@ -97,16 +125,40 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     final String bytesPerCRC = attrs.getValue("bytesPerCRC");
     final String bytesPerCRC = attrs.getValue("bytesPerCRC");
     final String crcPerBlock = attrs.getValue("crcPerBlock");
     final String crcPerBlock = attrs.getValue("crcPerBlock");
     final String md5 = attrs.getValue("md5");
     final String md5 = attrs.getValue("md5");
+    String crcType = attrs.getValue("crcType");
+    DataChecksum.Type finalCrcType;
     if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
     if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
       return null;
       return null;
     }
     }
 
 
     try {
     try {
-      return new MD5MD5CRC32FileChecksum(Integer.valueOf(bytesPerCRC),
-          Integer.valueOf(crcPerBlock), new MD5Hash(md5));
-    } catch(Exception e) {
+      // old versions don't support crcType.
+      if (crcType == null || crcType == "") {
+        finalCrcType = DataChecksum.Type.CRC32;
+      } else {
+        finalCrcType = DataChecksum.Type.valueOf(crcType);
+      }
+
+      switch (finalCrcType) {
+        case CRC32:
+          return new MD5MD5CRC32GzipFileChecksum(
+              Integer.valueOf(bytesPerCRC),
+              Integer.valueOf(crcPerBlock),
+              new MD5Hash(md5));
+        case CRC32C:
+          return new MD5MD5CRC32CastagnoliFileChecksum(
+              Integer.valueOf(bytesPerCRC),
+              Integer.valueOf(crcPerBlock),
+              new MD5Hash(md5));
+        default:
+          // we should never get here since finalCrcType will
+          // hold a valid type or we should have got an exception.
+          return null;
+      }
+    } catch (Exception e) {
       throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
       throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
-          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5, e);
+          + ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType 
+          + ", md5=" + md5, e);
     }
     }
   }
   }
 
 
@@ -114,4 +166,4 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
   public String toString() {
   public String toString() {
     return getAlgorithmName() + ":" + md5;
     return getAlgorithmName() + ":" + md5;
   }
   }
-}
+}

+ 40 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32GzipFileChecksum.java

@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.util.DataChecksum;
+
+/** For CRC32 with the Gzip polynomial */
+public class MD5MD5CRC32GzipFileChecksum extends MD5MD5CRC32FileChecksum {
+  /** Same as this(0, 0, null) */
+  public MD5MD5CRC32GzipFileChecksum() {
+    this(0, 0, null);
+  }
+
+  /** Create a MD5FileChecksum */
+  public MD5MD5CRC32GzipFileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
+    super(bytesPerCRC, crcPerBlock, md5);
+  }
+  @Override
+  public DataChecksum.Type getCrcType() {
+    // default to the one that is understood by all releases.
+    return DataChecksum.Type.CRC32;
+  }
+}

+ 128 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java

@@ -20,7 +20,9 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 
 
 /**
 /**
  * This class contains options related to file system operations.
  * This class contains options related to file system operations.
@@ -46,6 +48,10 @@ public final class Options {
     public static BytesPerChecksum bytesPerChecksum(short crc) {
     public static BytesPerChecksum bytesPerChecksum(short crc) {
       return new BytesPerChecksum(crc);
       return new BytesPerChecksum(crc);
     }
     }
+    public static ChecksumParam checksumParam(
+        ChecksumOpt csumOpt) {
+      return new ChecksumParam(csumOpt);
+    }
     public static Perms perms(FsPermission perm) {
     public static Perms perms(FsPermission perm) {
       return new Perms(perm);
       return new Perms(perm);
     }
     }
@@ -91,7 +97,8 @@ public final class Options {
       }
       }
       public int getValue() { return bufferSize; }
       public int getValue() { return bufferSize; }
     }
     }
-    
+
+    /** This is not needed if ChecksumParam is specified. **/
     public static class BytesPerChecksum extends CreateOpts {
     public static class BytesPerChecksum extends CreateOpts {
       private final int bytesPerChecksum;
       private final int bytesPerChecksum;
       protected BytesPerChecksum(short bpc) { 
       protected BytesPerChecksum(short bpc) { 
@@ -103,6 +110,14 @@ public final class Options {
       }
       }
       public int getValue() { return bytesPerChecksum; }
       public int getValue() { return bytesPerChecksum; }
     }
     }
+
+    public static class ChecksumParam extends CreateOpts {
+      private final ChecksumOpt checksumOpt;
+      protected ChecksumParam(ChecksumOpt csumOpt) {
+        checksumOpt = csumOpt;
+      }
+      public ChecksumOpt getValue() { return checksumOpt; }
+    }
     
     
     public static class Perms extends CreateOpts {
     public static class Perms extends CreateOpts {
       private final FsPermission permissions;
       private final FsPermission permissions;
@@ -206,4 +221,116 @@ public final class Options {
       return code;
       return code;
     }
     }
   }
   }
+
+  /**
+   * This is used in FileSystem and FileContext to specify checksum options.
+   */
+  public static class ChecksumOpt {
+    private final int crcBlockSize;
+    private final DataChecksum.Type crcType;
+
+    /**
+     * Create a uninitialized one
+     */
+    public ChecksumOpt() {
+      crcBlockSize = -1;
+      crcType = DataChecksum.Type.DEFAULT;
+    }
+
+    /**
+     * Normal ctor
+     * @param type checksum type
+     * @param size bytes per checksum
+     */
+    public ChecksumOpt(DataChecksum.Type type, int size) {
+      crcBlockSize = size;
+      crcType = type;
+    }
+
+    public int getBytesPerChecksum() {
+      return crcBlockSize;
+    }
+
+    public DataChecksum.Type getChecksumType() {
+      return crcType;
+    }
+
+    /**
+     * Create a ChecksumOpts that disables checksum
+     */
+    public static ChecksumOpt createDisabled() {
+      return new ChecksumOpt(DataChecksum.Type.NULL, -1);
+    }
+
+    /**
+     * A helper method for processing user input and default value to 
+     * create a combined checksum option. This is a bit complicated because
+     * bytesPerChecksum is kept for backward compatibility.
+     *
+     * @param defaultOpt Default checksum option
+     * @param userOpt User-specified checksum option. Ignored if null.
+     * @param userBytesPerChecksum User-specified bytesPerChecksum
+     *                Ignored if < 0.
+     */
+    public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt, 
+        ChecksumOpt userOpt, int userBytesPerChecksum) {
+      // The following is done to avoid unnecessary creation of new objects.
+      // tri-state variable: 0 default, 1 userBytesPerChecksum, 2 userOpt
+      short whichSize;
+      // true default, false userOpt
+      boolean useDefaultType;
+      
+      //  bytesPerChecksum - order of preference
+      //    user specified value in bytesPerChecksum
+      //    user specified value in checksumOpt
+      //    default.
+      if (userBytesPerChecksum > 0) {
+        whichSize = 1; // userBytesPerChecksum
+      } else if (userOpt != null && userOpt.getBytesPerChecksum() > 0) {
+        whichSize = 2; // userOpt
+      } else {
+        whichSize = 0; // default
+      }
+
+      // checksum type - order of preference
+      //   user specified value in checksumOpt
+      //   default.
+      if (userOpt != null &&
+            userOpt.getChecksumType() != DataChecksum.Type.DEFAULT) {
+        useDefaultType = false;
+      } else {
+        useDefaultType = true;
+      }
+
+      // Short out the common and easy cases
+      if (whichSize == 0 && useDefaultType) {
+        return defaultOpt;
+      } else if (whichSize == 2 && !useDefaultType) {
+        return userOpt;
+      }
+
+      // Take care of the rest of combinations
+      DataChecksum.Type type = useDefaultType ? defaultOpt.getChecksumType() :
+          userOpt.getChecksumType();
+      if (whichSize == 0) {
+        return new ChecksumOpt(type, defaultOpt.getBytesPerChecksum());
+      } else if (whichSize == 1) {
+        return new ChecksumOpt(type, userBytesPerChecksum);
+      } else {
+        return new ChecksumOpt(type, userOpt.getBytesPerChecksum());
+      }
+    }
+
+    /**
+     * A helper method for processing user input and default value to 
+     * create a combined checksum option. 
+     *
+     * @param defaultOpt Default checksum option
+     * @param userOpt User-specified checksum option
+     */
+    public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt,
+        ChecksumOpt userOpt) {
+      return processChecksumOpt(defaultOpt, userOpt, -1);
+    }
+  }
 }
 }

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

@@ -117,9 +117,4 @@ public class Trash extends Configured {
   public Runnable getEmptier() throws IOException {
   public Runnable getEmptier() throws IOException {
     return trashPolicy.getEmptier();
     return trashPolicy.getEmptier();
   }
   }
-
-  /** Run an emptier.*/
-  public static void main(String[] args) throws Exception {
-    new Trash(new Configuration()).getEmptier().run();
-  }
 }
 }

+ 31 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -34,7 +34,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -66,6 +65,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
 
   private Path current;
   private Path current;
   private Path homesParent;
   private Path homesParent;
+  private long emptierInterval;
 
 
   public TrashPolicyDefault() { }
   public TrashPolicyDefault() { }
 
 
@@ -79,8 +79,27 @@ public class TrashPolicyDefault extends TrashPolicy {
     this.trash = new Path(home, TRASH);
     this.trash = new Path(home, TRASH);
     this.homesParent = home.getParent();
     this.homesParent = home.getParent();
     this.current = new Path(trash, CURRENT);
     this.current = new Path(trash, CURRENT);
-    this.deletionInterval = (long) (conf.getFloat(FS_TRASH_INTERVAL_KEY,
-                                    FS_TRASH_INTERVAL_DEFAULT) *  MSECS_PER_MINUTE);
+    long trashInterval = 0;
+    try {
+      trashInterval = fs.getServerDefaults(home).getTrashInterval();
+    } catch (IOException ioe) {
+      LOG.warn("Unable to get server defaults", ioe);
+    }
+    // If the trash interval is not configured or is disabled on the
+    // server side then check the config which may be client side.
+    if (0 == trashInterval) {
+      this.deletionInterval = (long)(conf.getFloat(
+          FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+          * MSECS_PER_MINUTE);
+    } else {
+      this.deletionInterval = trashInterval * MSECS_PER_MINUTE;
+    }
+    // For the checkpoint interval use the given config instead of
+    // checking the server as it's OK if a client starts an emptier
+    // with a different interval than the server.
+    this.emptierInterval = (long)(conf.getFloat(
+        FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
   }
   }
   
   
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -89,7 +108,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
 
   @Override
   @Override
   public boolean isEnabled() {
   public boolean isEnabled() {
-    return (deletionInterval != 0);
+    return deletionInterval != 0;
   }
   }
 
 
   @Override
   @Override
@@ -223,7 +242,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
 
   @Override
   @Override
   public Runnable getEmptier() throws IOException {
   public Runnable getEmptier() throws IOException {
-    return new Emptier(getConf());
+    return new Emptier(getConf(), emptierInterval);
   }
   }
 
 
   private class Emptier implements Runnable {
   private class Emptier implements Runnable {
@@ -231,16 +250,14 @@ public class TrashPolicyDefault extends TrashPolicy {
     private Configuration conf;
     private Configuration conf;
     private long emptierInterval;
     private long emptierInterval;
 
 
-    Emptier(Configuration conf) throws IOException {
+    Emptier(Configuration conf, long emptierInterval) throws IOException {
       this.conf = conf;
       this.conf = conf;
-      this.emptierInterval = (long) (conf.getFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY,
-                                     FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT) *
-                                     MSECS_PER_MINUTE);
-      if (this.emptierInterval > deletionInterval ||
-          this.emptierInterval == 0) {
-        LOG.warn("The configured interval for checkpoint is " +
-                 this.emptierInterval + " minutes." +
-                 " Using interval of " + deletionInterval +
+      this.emptierInterval = emptierInterval;
+      if (emptierInterval > deletionInterval || emptierInterval == 0) {
+        LOG.info("The configured checkpoint interval is " +
+                 (emptierInterval / MSECS_PER_MINUTE) + " minutes." +
+                 " Using an interval of " +
+                 (deletionInterval / MSECS_PER_MINUTE) +
                  " minutes that is used for deletion instead");
                  " minutes that is used for deletion instead");
         this.emptierInterval = deletionInterval;
         this.emptierInterval = deletionInterval;
       }
       }

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java

@@ -23,10 +23,16 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.util.DataChecksum;
 
 
 /** 
 /** 
  * This class contains constants for configuration keys used
  * This class contains constants for configuration keys used
  * in the ftp file system.
  * in the ftp file system.
+ *
+ * Note that the settings for unimplemented features are ignored. 
+ * E.g. checksum related settings are just place holders. Even when
+ * wrapped with {@link ChecksumFileSystem}, these settings are not
+ * used. 
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
@@ -45,6 +51,9 @@ public class FtpConfigKeys extends CommonConfigurationKeys {
                                                 "ftp.client-write-packet-size";
                                                 "ftp.client-write-packet-size";
   public static final int     CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
   public static final int     CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
   public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false;
   public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false;
+  public static final long    FS_TRASH_INTERVAL_DEFAULT = 0;
+  public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT =
+      DataChecksum.Type.CRC32;
   
   
   protected static FsServerDefaults getServerDefaults() throws IOException {
   protected static FsServerDefaults getServerDefaults() throws IOException {
     return new FsServerDefaults(
     return new FsServerDefaults(
@@ -53,7 +62,9 @@ public class FtpConfigKeys extends CommonConfigurationKeys {
         CLIENT_WRITE_PACKET_SIZE_DEFAULT,
         CLIENT_WRITE_PACKET_SIZE_DEFAULT,
         REPLICATION_DEFAULT,
         REPLICATION_DEFAULT,
         STREAM_BUFFER_SIZE_DEFAULT,
         STREAM_BUFFER_SIZE_DEFAULT,
-        ENCRYPT_DATA_TRANSFER_DEFAULT);
+        ENCRYPT_DATA_TRANSFER_DEFAULT,
+        FS_TRASH_INTERVAL_DEFAULT,
+        CHECKSUM_TYPE_DEFAULT);
   }
   }
 }
 }
   
   

+ 13 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java

@@ -24,11 +24,18 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.util.DataChecksum;
 
 
 /** 
 /** 
  * This class contains constants for configuration keys used
  * This class contains constants for configuration keys used
  * in the local file system, raw local fs and checksum fs.
  * in the local file system, raw local fs and checksum fs.
+ *
+ * Note that the settings for unimplemented features are ignored. 
+ * E.g. checksum related settings are just place holders. Even when
+ * wrapped with {@link ChecksumFileSystem}, these settings are not
+ * used.
  */
  */
+
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class LocalConfigKeys extends CommonConfigurationKeys {
 public class LocalConfigKeys extends CommonConfigurationKeys {
@@ -44,7 +51,9 @@ public class LocalConfigKeys extends CommonConfigurationKeys {
                                                 "file.client-write-packet-size";
                                                 "file.client-write-packet-size";
   public static final int CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
   public static final int CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
   public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false;
   public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false;
-
+  public static final long FS_TRASH_INTERVAL_DEFAULT = 0;
+  public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT =
+      DataChecksum.Type.CRC32;
   public static FsServerDefaults getServerDefaults() throws IOException {
   public static FsServerDefaults getServerDefaults() throws IOException {
     return new FsServerDefaults(
     return new FsServerDefaults(
         BLOCK_SIZE_DEFAULT,
         BLOCK_SIZE_DEFAULT,
@@ -52,7 +61,9 @@ public class LocalConfigKeys extends CommonConfigurationKeys {
         CLIENT_WRITE_PACKET_SIZE_DEFAULT,
         CLIENT_WRITE_PACKET_SIZE_DEFAULT,
         REPLICATION_DEFAULT,
         REPLICATION_DEFAULT,
         STREAM_BUFFER_SIZE_DEFAULT,
         STREAM_BUFFER_SIZE_DEFAULT,
-        ENCRYPT_DATA_TRANSFER_DEFAULT);
+        ENCRYPT_DATA_TRANSFER_DEFAULT,
+        FS_TRASH_INTERVAL_DEFAULT,
+        CHECKSUM_TYPE_DEFAULT);
   }
   }
 }
 }
   
   

+ 9 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -44,12 +44,12 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 
 
 abstract public class Command extends Configured {
 abstract public class Command extends Configured {
-  /** default name of the command */
-  public static String NAME;
-  /** the command's usage switches and arguments format */
-  public static String USAGE;
-  /** the command's long description */
-  public static String DESCRIPTION;
+  /** field name indicating the default name of the command */
+  public static final String COMMAND_NAME_FIELD = "NAME";
+  /** field name indicating the command's usage switches and arguments format */
+  public static final String COMMAND_USAGE_FIELD = "USAGE";
+  /** field name indicating the command's long description */
+  public static final String COMMAND_DESCRIPTION_FIELD = "DESCRIPTION";
     
     
   protected String[] args;
   protected String[] args;
   protected String name;
   protected String name;
@@ -397,7 +397,7 @@ abstract public class Command extends Configured {
    */
    */
   public String getName() {
   public String getName() {
     return (name == null)
     return (name == null)
-      ? getCommandField("NAME")
+      ? getCommandField(COMMAND_NAME_FIELD)
       : name.startsWith("-") ? name.substring(1) : name;
       : name.startsWith("-") ? name.substring(1) : name;
   }
   }
 
 
@@ -415,7 +415,7 @@ abstract public class Command extends Configured {
    */
    */
   public String getUsage() {
   public String getUsage() {
     String cmd = "-" + getName();
     String cmd = "-" + getName();
-    String usage = isDeprecated() ? "" : getCommandField("USAGE");
+    String usage = isDeprecated() ? "" : getCommandField(COMMAND_USAGE_FIELD);
     return usage.isEmpty() ? cmd : cmd + " " + usage; 
     return usage.isEmpty() ? cmd : cmd + " " + usage; 
   }
   }
 
 
@@ -426,7 +426,7 @@ abstract public class Command extends Configured {
   public String getDescription() {
   public String getDescription() {
     return isDeprecated()
     return isDeprecated()
       ? "(DEPRECATED) Same as '" + getReplacementCommand() + "'"
       ? "(DEPRECATED) Same as '" + getReplacementCommand() + "'"
-      : getCommandField("DESCRIPTION");
+      : getCommandField(COMMAND_DESCRIPTION_FIELD);
   }
   }
 
 
   /**
   /**

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -159,11 +160,11 @@ class ChRootedFs extends AbstractFileSystem {
   public FSDataOutputStream createInternal(final Path f,
   public FSDataOutputStream createInternal(final Path f,
       final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
       final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
       final int bufferSize, final short replication, final long blockSize,
       final int bufferSize, final short replication, final long blockSize,
-      final Progressable progress, final int bytesPerChecksum,
+      final Progressable progress, final ChecksumOpt checksumOpt,
       final boolean createParent) throws IOException, UnresolvedLinkException {
       final boolean createParent) throws IOException, UnresolvedLinkException {
     return myFs.createInternal(fullPath(f), flag,
     return myFs.createInternal(fullPath(f), flag,
         absolutePermission, bufferSize,
         absolutePermission, bufferSize,
-        replication, blockSize, progress, bytesPerChecksum, createParent);
+        replication, blockSize, progress, checksumOpt, createParent);
   }
   }
 
 
   @Override
   @Override

+ 13 - 62
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -23,7 +23,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
 import java.util.Set;
 import java.util.Set;
@@ -49,11 +49,8 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
@@ -235,11 +232,6 @@ public class ViewFileSystem extends FileSystem {
     return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory();
     return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory();
   }
   }
   
   
-  @Override
-  public String getCanonicalServiceName() {
-    return null;
-  }
-
   @Override
   @Override
   public URI getUri() {
   public URI getUri() {
     return myUri;
     return myUri;
@@ -549,6 +541,18 @@ public class ViewFileSystem extends FileSystem {
     }
     }
   }
   }
 
 
+  @Override
+  public FileSystem[] getChildFileSystems() {
+    List<InodeTree.MountPoint<FileSystem>> mountPoints =
+        fsState.getMountPoints();
+    Set<FileSystem> children = new HashSet<FileSystem>();
+    for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
+      FileSystem targetFs = mountPoint.target.targetFileSystem;
+      children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
+    }
+    return children.toArray(new FileSystem[]{});
+  }
+  
   public MountPoint[] getMountPoints() {
   public MountPoint[] getMountPoints() {
     List<InodeTree.MountPoint<FileSystem>> mountPoints = 
     List<InodeTree.MountPoint<FileSystem>> mountPoints = 
                   fsState.getMountPoints();
                   fsState.getMountPoints();
@@ -561,59 +565,6 @@ public class ViewFileSystem extends FileSystem {
     return result;
     return result;
   }
   }
   
   
- 
-  @Override
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    List<InodeTree.MountPoint<FileSystem>> mountPoints = 
-                fsState.getMountPoints();
-    int initialListSize  = 0;
-    for (InodeTree.MountPoint<FileSystem> im : mountPoints) {
-      initialListSize += im.target.targetDirLinkList.length; 
-    }
-    List<Token<?>> result = new ArrayList<Token<?>>(initialListSize);
-    for ( int i = 0; i < mountPoints.size(); ++i ) {
-      List<Token<?>> tokens = 
-        mountPoints.get(i).target.targetFileSystem.getDelegationTokens(renewer);
-      if (tokens != null) {
-        result.addAll(tokens);
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public List<Token<?>> getDelegationTokens(String renewer,
-      Credentials credentials) throws IOException {
-    List<InodeTree.MountPoint<FileSystem>> mountPoints =
-        fsState.getMountPoints();
-    int initialListSize = 0;
-    for (InodeTree.MountPoint<FileSystem> im : mountPoints) {
-      initialListSize += im.target.targetDirLinkList.length;
-    }
-    Set<String> seenServiceNames = new HashSet<String>();
-    List<Token<?>> result = new ArrayList<Token<?>>(initialListSize);
-    for (int i = 0; i < mountPoints.size(); ++i) {
-      String serviceName =
-          mountPoints.get(i).target.targetFileSystem.getCanonicalServiceName();
-      if (serviceName == null || seenServiceNames.contains(serviceName)) {
-        continue;
-      }
-      seenServiceNames.add(serviceName);
-      Token<?> knownToken = credentials.getToken(new Text(serviceName));
-      if (knownToken != null) {
-        result.add(knownToken);
-      } else {
-        List<Token<?>> tokens =
-            mountPoints.get(i).target.targetFileSystem
-                .getDelegationTokens(renewer);
-        if (tokens != null) {
-          result.addAll(tokens);
-        }
-      }
-    }
-    return result;
-  }
-
   /*
   /*
    * An instance of this class represents an internal dir of the viewFs 
    * An instance of this class represents an internal dir of the viewFs 
    * that is internal dir of the mount table.
    * that is internal dir of the mount table.

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -265,7 +266,7 @@ public class ViewFs extends AbstractFileSystem {
   public FSDataOutputStream createInternal(final Path f,
   public FSDataOutputStream createInternal(final Path f,
       final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
       final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
       final int bufferSize, final short replication, final long blockSize,
       final int bufferSize, final short replication, final long blockSize,
-      final Progressable progress, final int bytesPerChecksum,
+      final Progressable progress, final ChecksumOpt checksumOpt,
       final boolean createParent) throws AccessControlException,
       final boolean createParent) throws AccessControlException,
       FileAlreadyExistsException, FileNotFoundException,
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, UnsupportedFileSystemException,
       ParentNotDirectoryException, UnsupportedFileSystemException,
@@ -283,7 +284,7 @@ public class ViewFs extends AbstractFileSystem {
     assert(res.remainingPath != null);
     assert(res.remainingPath != null);
     return res.targetFileSystem.createInternal(res.remainingPath, flag,
     return res.targetFileSystem.createInternal(res.remainingPath, flag,
         absolutePermission, bufferSize, replication,
         absolutePermission, bufferSize, replication,
-        blockSize, progress, bytesPerChecksum,
+        blockSize, progress, checksumOpt,
         createParent);
         createParent);
   }
   }
 
 
@@ -632,7 +633,7 @@ public class ViewFs extends AbstractFileSystem {
     public FSDataOutputStream createInternal(final Path f,
     public FSDataOutputStream createInternal(final Path f,
         final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
         final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
         final int bufferSize, final short replication, final long blockSize,
         final int bufferSize, final short replication, final long blockSize,
-        final Progressable progress, final int bytesPerChecksum,
+        final Progressable progress, final ChecksumOpt checksumOpt,
         final boolean createParent) throws AccessControlException,
         final boolean createParent) throws AccessControlException,
         FileAlreadyExistsException, FileNotFoundException,
         FileAlreadyExistsException, FileNotFoundException,
         ParentNotDirectoryException, UnsupportedFileSystemException,
         ParentNotDirectoryException, UnsupportedFileSystemException,

+ 19 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -49,16 +49,34 @@ public class FailoverController {
   private final int rpcTimeoutToNewActive;
   private final int rpcTimeoutToNewActive;
   
   
   private final Configuration conf;
   private final Configuration conf;
+  /*
+   * Need a copy of conf for graceful fence to set 
+   * configurable retries for IPC client.
+   * Refer HDFS-3561
+   */
+  private final Configuration gracefulFenceConf;
 
 
   private final RequestSource requestSource;
   private final RequestSource requestSource;
   
   
   public FailoverController(Configuration conf,
   public FailoverController(Configuration conf,
       RequestSource source) {
       RequestSource source) {
     this.conf = conf;
     this.conf = conf;
+    this.gracefulFenceConf = new Configuration(conf);
     this.requestSource = source;
     this.requestSource = source;
     
     
     this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
     this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
     this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
     this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
+    
+    //Configure less retries for graceful fence 
+    int gracefulFenceConnectRetries = conf.getInt(
+        CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES,
+        CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT);
+    gracefulFenceConf.setInt(
+        CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+        gracefulFenceConnectRetries);
+    gracefulFenceConf.setInt(
+        CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
+        gracefulFenceConnectRetries);
   }
   }
 
 
   static int getGracefulFenceTimeout(Configuration conf) {
   static int getGracefulFenceTimeout(Configuration conf) {
@@ -150,7 +168,7 @@ public class FailoverController {
   boolean tryGracefulFence(HAServiceTarget svc) {
   boolean tryGracefulFence(HAServiceTarget svc) {
     HAServiceProtocol proxy = null;
     HAServiceProtocol proxy = null;
     try {
     try {
-      proxy = svc.getProxy(conf, gracefulFenceTimeout);
+      proxy = svc.getProxy(gracefulFenceConf, gracefulFenceTimeout);
       proxy.transitionToStandby(createReqInfo());
       proxy.transitionToStandby(createReqInfo());
       return true;
       return true;
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -330,6 +330,12 @@ public class HttpServer implements FilterContainer {
       Context logContext = new Context(parent, "/logs");
       Context logContext = new Context(parent, "/logs");
       logContext.setResourceBase(logDir);
       logContext.setResourceBase(logDir);
       logContext.addServlet(AdminAuthorizedServlet.class, "/*");
       logContext.addServlet(AdminAuthorizedServlet.class, "/*");
+      if (conf.getBoolean(
+          CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
+          CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
+        logContext.getInitParams().put(
+            "org.mortbay.jetty.servlet.Default.aliases", "true");
+      }
       logContext.setDisplayName("logs");
       logContext.setDisplayName("logs");
       setContextAttributes(logContext, conf);
       setContextAttributes(logContext, conf);
       defaultContexts.put(logContext, true);
       defaultContexts.put(logContext, true);

+ 13 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -206,12 +206,20 @@ public class IOUtils {
    * for any reason (including EOF)
    * for any reason (including EOF)
    */
    */
   public static void skipFully(InputStream in, long len) throws IOException {
   public static void skipFully(InputStream in, long len) throws IOException {
-    while (len > 0) {
-      long ret = in.skip(len);
-      if (ret < 0) {
-        throw new IOException( "Premature EOF from inputStream");
+    long amt = len;
+    while (amt > 0) {
+      long ret = in.skip(amt);
+      if (ret == 0) {
+        // skip may return 0 even if we're not at EOF.  Luckily, we can 
+        // use the read() method to figure out if we're at the end.
+        int b = in.read();
+        if (b == -1) {
+          throw new EOFException( "Premature EOF from inputStream after " +
+              "skipping " + (len - amt) + " byte(s).");
+        }
+        ret = 1;
       }
       }
-      len -= ret;
+      amt -= ret;
     }
     }
   }
   }
   
   

+ 42 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -46,11 +46,13 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
@@ -108,6 +110,42 @@ import com.google.common.annotations.VisibleForTesting;
 public abstract class Server {
 public abstract class Server {
   private final boolean authorize;
   private final boolean authorize;
   private boolean isSecurityEnabled;
   private boolean isSecurityEnabled;
+  private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
+  
+  public void addTerseExceptions(Class<?>... exceptionClass) {
+    exceptionsHandler.addTerseExceptions(exceptionClass);
+  }
+
+  /**
+   * ExceptionsHandler manages Exception groups for special handling
+   * e.g., terse exception group for concise logging messages
+   */
+  static class ExceptionsHandler {
+    private volatile Set<String> terseExceptions = new HashSet<String>();
+
+    /**
+     * Add exception class so server won't log its stack trace.
+     * Modifying the terseException through this method is thread safe.
+     *
+     * @param exceptionClass exception classes 
+     */
+    void addTerseExceptions(Class<?>... exceptionClass) {
+
+      // Make a copy of terseException for performing modification
+      final HashSet<String> newSet = new HashSet<String>(terseExceptions);
+
+      // Add all class names into the HashSet
+      for (Class<?> name : exceptionClass) {
+        newSet.add(name.toString());
+      }
+      // Replace terseException set
+      terseExceptions = Collections.unmodifiableSet(newSet);
+    }
+
+    boolean isTerse(Class<?> t) {
+      return terseExceptions.contains(t.toString());
+    }
+  }
   
   
   /**
   /**
    * The first four bytes of Hadoop RPC connections
    * The first four bytes of Hadoop RPC connections
@@ -1704,8 +1742,8 @@ public abstract class Server {
               // on the server side, as opposed to just a normal exceptional
               // on the server side, as opposed to just a normal exceptional
               // result.
               // result.
               LOG.warn(logMsg, e);
               LOG.warn(logMsg, e);
-            } else if (e instanceof StandbyException) {
-              // Don't log the whole stack trace of these exceptions.
+            } else if (exceptionsHandler.isTerse(e.getClass())) {
+             // Don't log the whole stack trace of these exceptions.
               // Way too noisy!
               // Way too noisy!
               LOG.info(logMsg);
               LOG.info(logMsg);
             } else {
             } else {
@@ -1844,6 +1882,8 @@ public abstract class Server {
     if (isSecurityEnabled) {
     if (isSecurityEnabled) {
       SaslRpcServer.init(conf);
       SaslRpcServer.init(conf);
     }
     }
+    
+    this.exceptionsHandler.addTerseExceptions(StandbyException.class);
   }
   }
 
 
   private void closeConnection(Connection connection) {
   private void closeConnection(Connection connection) {

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -56,6 +56,20 @@ public class Credentials implements Writable {
   private  Map<Text, Token<? extends TokenIdentifier>> tokenMap = 
   private  Map<Text, Token<? extends TokenIdentifier>> tokenMap = 
     new HashMap<Text, Token<? extends TokenIdentifier>>(); 
     new HashMap<Text, Token<? extends TokenIdentifier>>(); 
 
 
+  /**
+   * Create an empty credentials instance
+   */
+  public Credentials() {
+  }
+  
+  /**
+   * Create a copy of the given credentials
+   * @param credentials to copy
+   */
+  public Credentials(Credentials credentials) {
+    this.addAll(credentials);
+  }
+  
   /**
   /**
    * Returns the key bytes for the alias
    * Returns the key bytes for the alias
    * @param alias the alias for the key
    * @param alias the alias for the key
@@ -260,4 +274,10 @@ public class Credentials implements Writable {
       }
       }
     }
     }
   }
   }
+  
+  public void addTokensToUGI(UserGroupInformation ugi) {
+    for (Map.Entry<Text, Token<?>> token: tokenMap.entrySet()) {
+      ugi.addToken(token.getKey(), token.getValue());
+    }
+  }
 }
 }

+ 63 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+public class JniBasedUnixGroupsMappingWithFallback implements
+    GroupMappingServiceProvider {
+
+  private static final Log LOG = LogFactory
+      .getLog(JniBasedUnixGroupsMappingWithFallback.class);
+  
+  private GroupMappingServiceProvider impl;
+
+  public JniBasedUnixGroupsMappingWithFallback() {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      this.impl = new JniBasedUnixGroupsMapping();
+    } else {
+      LOG.info("Falling back to shell based");
+      this.impl = new ShellBasedUnixGroupsMapping();
+    }
+    if (LOG.isDebugEnabled()){
+      LOG.debug("Group mapping impl=" + impl.getClass().getName());
+    }
+  }
+
+  @Override
+  public List<String> getGroups(String user) throws IOException {
+    return impl.getGroups(user);
+  }
+
+  @Override
+  public void cacheGroupsRefresh() throws IOException {
+    impl.cacheGroupsRefresh();
+  }
+
+  @Override
+  public void cacheGroupsAdd(List<String> groups) throws IOException {
+    impl.cacheGroupsAdd(groups);
+  }
+
+}

+ 63 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+public class JniBasedUnixGroupsNetgroupMappingWithFallback implements
+    GroupMappingServiceProvider {
+
+  private static final Log LOG = LogFactory
+      .getLog(JniBasedUnixGroupsNetgroupMappingWithFallback.class);
+
+  private GroupMappingServiceProvider impl;
+
+  public JniBasedUnixGroupsNetgroupMappingWithFallback() {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      this.impl = new JniBasedUnixGroupsNetgroupMapping();
+    } else {
+      LOG.info("Falling back to shell based");
+      this.impl = new ShellBasedUnixGroupsNetgroupMapping();
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Group mapping impl=" + impl.getClass().getName());
+    }
+  }
+
+  @Override
+  public List<String> getGroups(String user) throws IOException {
+    return impl.getGroups(user);
+  }
+
+  @Override
+  public void cacheGroupsRefresh() throws IOException {
+    impl.cacheGroupsRefresh();
+  }
+
+  @Override
+  public void cacheGroupsAdd(List<String> groups) throws IOException {
+    impl.cacheGroupsAdd(groups);
+  }
+
+}

+ 70 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -641,14 +642,12 @@ public class UserGroupInformation {
                                           AuthenticationMethod.SIMPLE);
                                           AuthenticationMethod.SIMPLE);
         loginUser = new UserGroupInformation(login.getSubject());
         loginUser = new UserGroupInformation(login.getSubject());
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
-        if (fileLocation != null && isSecurityEnabled()) {
+        if (fileLocation != null) {
           // load the token storage file and put all of the tokens into the
           // load the token storage file and put all of the tokens into the
           // user.
           // user.
           Credentials cred = Credentials.readTokenStorageFile(
           Credentials cred = Credentials.readTokenStorageFile(
               new Path("file:///" + fileLocation), conf);
               new Path("file:///" + fileLocation), conf);
-          for (Token<?> token: cred.getAllTokens()) {
-            loginUser.addToken(token);
-          }
+          cred.addTokensToUGI(loginUser);
         }
         }
         loginUser.spawnAutoRenewalThreadForUserCreds();
         loginUser.spawnAutoRenewalThreadForUserCreds();
       } catch (LoginException le) {
       } catch (LoginException le) {
@@ -1177,6 +1176,41 @@ public class UserGroupInformation {
   public synchronized Set<TokenIdentifier> getTokenIdentifiers() {
   public synchronized Set<TokenIdentifier> getTokenIdentifiers() {
     return subject.getPublicCredentials(TokenIdentifier.class);
     return subject.getPublicCredentials(TokenIdentifier.class);
   }
   }
+
+  // wrapper to retain the creds key for the token
+  private class NamedToken {
+    Text alias;
+    Token<? extends TokenIdentifier> token;
+    NamedToken(Text alias, Token<? extends TokenIdentifier> token) {
+      this.alias = alias;
+      this.token = token;
+    }
+    @Override
+    public boolean equals(Object o) {
+      boolean equals;
+      if (o == this) {
+        equals = true;
+      } else if (!(o instanceof NamedToken)) {
+        equals = false;
+      } else {
+        Text otherAlias = ((NamedToken)o).alias;
+        if (alias == otherAlias) {
+          equals = true;
+        } else {
+          equals = (otherAlias != null && otherAlias.equals(alias));
+        }
+      }
+      return equals;
+    }
+    @Override
+    public int hashCode() {
+      return (alias != null) ? alias.hashCode() : -1; 
+    }
+    @Override
+    public String toString() {
+      return "NamedToken: alias="+alias+" token="+token;
+    }
+  }
   
   
   /**
   /**
    * Add a token to this UGI
    * Add a token to this UGI
@@ -1185,7 +1219,22 @@ public class UserGroupInformation {
    * @return true on successful add of new token
    * @return true on successful add of new token
    */
    */
   public synchronized boolean addToken(Token<? extends TokenIdentifier> token) {
   public synchronized boolean addToken(Token<? extends TokenIdentifier> token) {
-    return subject.getPrivateCredentials().add(token);
+    return addToken(token.getService(), token);
+  }
+
+  /**
+   * Add a named token to this UGI
+   * 
+   * @param alias Name of the token
+   * @param token Token to be added
+   * @return true on successful add of new token
+   */
+  public synchronized boolean addToken(Text alias,
+                                       Token<? extends TokenIdentifier> token) {
+    NamedToken namedToken = new NamedToken(alias, token);
+    Collection<Object> ugiCreds = subject.getPrivateCredentials();
+    ugiCreds.remove(namedToken); // allow token to be replaced
+    return ugiCreds.add(new NamedToken(alias, token));
   }
   }
   
   
   /**
   /**
@@ -1195,14 +1244,23 @@ public class UserGroupInformation {
    */
    */
   public synchronized
   public synchronized
   Collection<Token<? extends TokenIdentifier>> getTokens() {
   Collection<Token<? extends TokenIdentifier>> getTokens() {
-    Set<Object> creds = subject.getPrivateCredentials();
-    List<Token<?>> result = new ArrayList<Token<?>>(creds.size());
-    for(Object o: creds) {
-      if (o instanceof Token<?>) {
-        result.add((Token<?>) o);
-      }
+    return Collections.unmodifiableList(
+        new ArrayList<Token<?>>(getCredentials().getAllTokens()));
+  }
+
+  /**
+   * Obtain the tokens in credentials form associated with this user.
+   * 
+   * @return Credentials of tokens associated with this user
+   */
+  public synchronized Credentials getCredentials() {
+    final Credentials credentials = new Credentials();
+    final Set<NamedToken> namedTokens =
+        subject.getPrivateCredentials(NamedToken.class);
+    for (final NamedToken namedToken : namedTokens) {
+      credentials.addToken(namedToken.alias, namedToken.token);
     }
     }
-    return Collections.unmodifiableList(result);
+    return credentials;
   }
   }
 
 
   /**
   /**

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -113,6 +113,16 @@ extends AbstractDelegationTokenIdentifier>
     }
     }
   }
   }
   
   
+  /**
+   * Reset all data structures and mutable state.
+   */
+  public synchronized void reset() {
+    currentId = 0;
+    allKeys.clear();
+    delegationTokenSequenceNumber = 0;
+    currentTokens.clear();
+  }
+  
   /** 
   /** 
    * Add a previously used master key to cache (when NN restarts), 
    * Add a previously used master key to cache (when NN restarts), 
    * should be called before activate().
    * should be called before activate().
@@ -190,7 +200,6 @@ extends AbstractDelegationTokenIdentifier>
   
   
   @Override
   @Override
   protected synchronized byte[] createPassword(TokenIdent identifier) {
   protected synchronized byte[] createPassword(TokenIdent identifier) {
-    LOG.info("Creating password for identifier: "+identifier);
     int sequenceNum;
     int sequenceNum;
     long now = Time.now();
     long now = Time.now();
     sequenceNum = ++delegationTokenSequenceNumber;
     sequenceNum = ++delegationTokenSequenceNumber;
@@ -198,6 +207,7 @@ extends AbstractDelegationTokenIdentifier>
     identifier.setMaxDate(now + tokenMaxLifetime);
     identifier.setMaxDate(now + tokenMaxLifetime);
     identifier.setMasterKeyId(currentId);
     identifier.setMasterKeyId(currentId);
     identifier.setSequenceNumber(sequenceNum);
     identifier.setSequenceNumber(sequenceNum);
+    LOG.info("Creating password for identifier: " + identifier);
     byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
     byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
     currentTokens.put(identifier, new DelegationTokenInformation(now
     currentTokens.put(identifier, new DelegationTokenInformation(now
         + tokenRenewInterval, password));
         + tokenRenewInterval, password));

+ 60 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java

@@ -43,31 +43,48 @@ public class DataChecksum implements Checksum {
   public static final int CHECKSUM_NULL    = 0;
   public static final int CHECKSUM_NULL    = 0;
   public static final int CHECKSUM_CRC32   = 1;
   public static final int CHECKSUM_CRC32   = 1;
   public static final int CHECKSUM_CRC32C  = 2;
   public static final int CHECKSUM_CRC32C  = 2;
-  
-  private static String[] NAMES = new String[] {
-    "NULL", "CRC32", "CRC32C"
-  };
-  
-  private static final int CHECKSUM_NULL_SIZE  = 0;
-  private static final int CHECKSUM_CRC32_SIZE = 4;
-  private static final int CHECKSUM_CRC32C_SIZE = 4;
-  
-  
-  public static DataChecksum newDataChecksum( int type, int bytesPerChecksum ) {
+  public static final int CHECKSUM_DEFAULT = 3; 
+  public static final int CHECKSUM_MIXED   = 4;
+ 
+  /** The checksum types */
+  public static enum Type {
+    NULL  (CHECKSUM_NULL, 0),
+    CRC32 (CHECKSUM_CRC32, 4),
+    CRC32C(CHECKSUM_CRC32C, 4),
+    DEFAULT(CHECKSUM_DEFAULT, 0), // This cannot be used to create DataChecksum
+    MIXED (CHECKSUM_MIXED, 0); // This cannot be used to create DataChecksum
+
+    public final int id;
+    public final int size;
+    
+    private Type(int id, int size) {
+      this.id = id;
+      this.size = size;
+    }
+
+    /** @return the type corresponding to the id. */
+    public static Type valueOf(int id) {
+      if (id < 0 || id >= values().length) {
+        throw new IllegalArgumentException("id=" + id
+            + " out of range [0, " + values().length + ")");
+      }
+      return values()[id];
+    }
+  }
+
+
+  public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) {
     if ( bytesPerChecksum <= 0 ) {
     if ( bytesPerChecksum <= 0 ) {
       return null;
       return null;
     }
     }
     
     
     switch ( type ) {
     switch ( type ) {
-    case CHECKSUM_NULL :
-      return new DataChecksum( CHECKSUM_NULL, new ChecksumNull(), 
-                               CHECKSUM_NULL_SIZE, bytesPerChecksum );
-    case CHECKSUM_CRC32 :
-      return new DataChecksum( CHECKSUM_CRC32, new PureJavaCrc32(), 
-                               CHECKSUM_CRC32_SIZE, bytesPerChecksum );
-    case CHECKSUM_CRC32C:
-      return new DataChecksum( CHECKSUM_CRC32C, new PureJavaCrc32C(),
-                               CHECKSUM_CRC32C_SIZE, bytesPerChecksum);
+    case NULL :
+      return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum );
+    case CRC32 :
+      return new DataChecksum(type, new PureJavaCrc32(), bytesPerChecksum );
+    case CRC32C:
+      return new DataChecksum(type, new PureJavaCrc32C(), bytesPerChecksum);
     default:
     default:
       return null;  
       return null;  
     }
     }
@@ -87,7 +104,7 @@ public class DataChecksum implements Checksum {
                            ( (bytes[offset+2] & 0xff) << 16 ) |
                            ( (bytes[offset+2] & 0xff) << 16 ) |
                            ( (bytes[offset+3] & 0xff) << 8 )  |
                            ( (bytes[offset+3] & 0xff) << 8 )  |
                            ( (bytes[offset+4] & 0xff) );
                            ( (bytes[offset+4] & 0xff) );
-    return newDataChecksum( bytes[0], bytesPerChecksum );
+    return newDataChecksum( Type.valueOf(bytes[0]), bytesPerChecksum );
   }
   }
   
   
   /**
   /**
@@ -98,7 +115,7 @@ public class DataChecksum implements Checksum {
                                  throws IOException {
                                  throws IOException {
     int type = in.readByte();
     int type = in.readByte();
     int bpc = in.readInt();
     int bpc = in.readInt();
-    DataChecksum summer = newDataChecksum( type, bpc );
+    DataChecksum summer = newDataChecksum(Type.valueOf(type), bpc );
     if ( summer == null ) {
     if ( summer == null ) {
       throw new IOException( "Could not create DataChecksum of type " +
       throw new IOException( "Could not create DataChecksum of type " +
                              type + " with bytesPerChecksum " + bpc );
                              type + " with bytesPerChecksum " + bpc );
@@ -111,13 +128,13 @@ public class DataChecksum implements Checksum {
    */
    */
   public void writeHeader( DataOutputStream out ) 
   public void writeHeader( DataOutputStream out ) 
                            throws IOException { 
                            throws IOException { 
-    out.writeByte( type );
+    out.writeByte( type.id );
     out.writeInt( bytesPerChecksum );
     out.writeInt( bytesPerChecksum );
   }
   }
 
 
   public byte[] getHeader() {
   public byte[] getHeader() {
     byte[] header = new byte[DataChecksum.HEADER_LEN];
     byte[] header = new byte[DataChecksum.HEADER_LEN];
-    header[0] = (byte) (type & 0xff);
+    header[0] = (byte) (type.id & 0xff);
     // Writing in buffer just like DataOutput.WriteInt()
     // Writing in buffer just like DataOutput.WriteInt()
     header[1+0] = (byte) ((bytesPerChecksum >>> 24) & 0xff);
     header[1+0] = (byte) ((bytesPerChecksum >>> 24) & 0xff);
     header[1+1] = (byte) ((bytesPerChecksum >>> 16) & 0xff);
     header[1+1] = (byte) ((bytesPerChecksum >>> 16) & 0xff);
@@ -133,11 +150,11 @@ public class DataChecksum implements Checksum {
    */
    */
    public int writeValue( DataOutputStream out, boolean reset )
    public int writeValue( DataOutputStream out, boolean reset )
                           throws IOException {
                           throws IOException {
-     if ( size <= 0 ) {
+     if ( type.size <= 0 ) {
        return 0;
        return 0;
      }
      }
 
 
-     if ( size == 4 ) {
+     if ( type.size == 4 ) {
        out.writeInt( (int) summer.getValue() );
        out.writeInt( (int) summer.getValue() );
      } else {
      } else {
        throw new IOException( "Unknown Checksum " + type );
        throw new IOException( "Unknown Checksum " + type );
@@ -147,7 +164,7 @@ public class DataChecksum implements Checksum {
        reset();
        reset();
      }
      }
      
      
-     return size;
+     return type.size;
    }
    }
    
    
    /**
    /**
@@ -157,11 +174,11 @@ public class DataChecksum implements Checksum {
     */
     */
     public int writeValue( byte[] buf, int offset, boolean reset )
     public int writeValue( byte[] buf, int offset, boolean reset )
                            throws IOException {
                            throws IOException {
-      if ( size <= 0 ) {
+      if ( type.size <= 0 ) {
         return 0;
         return 0;
       }
       }
 
 
-      if ( size == 4 ) {
+      if ( type.size == 4 ) {
         int checksum = (int) summer.getValue();
         int checksum = (int) summer.getValue();
         buf[offset+0] = (byte) ((checksum >>> 24) & 0xff);
         buf[offset+0] = (byte) ((checksum >>> 24) & 0xff);
         buf[offset+1] = (byte) ((checksum >>> 16) & 0xff);
         buf[offset+1] = (byte) ((checksum >>> 16) & 0xff);
@@ -175,7 +192,7 @@ public class DataChecksum implements Checksum {
         reset();
         reset();
       }
       }
       
       
-      return size;
+      return type.size;
     }
     }
    
    
    /**
    /**
@@ -183,36 +200,33 @@ public class DataChecksum implements Checksum {
     * @return true if the checksum matches and false otherwise.
     * @return true if the checksum matches and false otherwise.
     */
     */
    public boolean compare( byte buf[], int offset ) {
    public boolean compare( byte buf[], int offset ) {
-     if ( size == 4 ) {
+     if ( type.size == 4 ) {
        int checksum = ( (buf[offset+0] & 0xff) << 24 ) | 
        int checksum = ( (buf[offset+0] & 0xff) << 24 ) | 
                       ( (buf[offset+1] & 0xff) << 16 ) |
                       ( (buf[offset+1] & 0xff) << 16 ) |
                       ( (buf[offset+2] & 0xff) << 8 )  |
                       ( (buf[offset+2] & 0xff) << 8 )  |
                       ( (buf[offset+3] & 0xff) );
                       ( (buf[offset+3] & 0xff) );
        return checksum == (int) summer.getValue();
        return checksum == (int) summer.getValue();
      }
      }
-     return size == 0;
+     return type.size == 0;
    }
    }
    
    
-  private final int type;
-  private final int size;
+  private final Type type;
   private final Checksum summer;
   private final Checksum summer;
   private final int bytesPerChecksum;
   private final int bytesPerChecksum;
   private int inSum = 0;
   private int inSum = 0;
   
   
-  private DataChecksum( int checksumType, Checksum checksum,
-                        int sumSize, int chunkSize ) {
-    type = checksumType;
+  private DataChecksum( Type type, Checksum checksum, int chunkSize ) {
+    this.type = type;
     summer = checksum;
     summer = checksum;
-    size = sumSize;
     bytesPerChecksum = chunkSize;
     bytesPerChecksum = chunkSize;
   }
   }
   
   
   // Accessors
   // Accessors
-  public int getChecksumType() {
+  public Type getChecksumType() {
     return type;
     return type;
   }
   }
   public int getChecksumSize() {
   public int getChecksumSize() {
-    return size;
+    return type.size;
   }
   }
   public int getBytesPerChecksum() {
   public int getBytesPerChecksum() {
     return bytesPerChecksum;
     return bytesPerChecksum;
@@ -260,7 +274,7 @@ public class DataChecksum implements Checksum {
   public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums,
   public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums,
       String fileName, long basePos)
       String fileName, long basePos)
   throws ChecksumException {
   throws ChecksumException {
-    if (size == 0) return;
+    if (type.size == 0) return;
     
     
     if (data.hasArray() && checksums.hasArray()) {
     if (data.hasArray() && checksums.hasArray()) {
       verifyChunkedSums(
       verifyChunkedSums(
@@ -270,7 +284,7 @@ public class DataChecksum implements Checksum {
       return;
       return;
     }
     }
     if (NativeCrc32.isAvailable()) {
     if (NativeCrc32.isAvailable()) {
-      NativeCrc32.verifyChunkedSums(bytesPerChecksum, type, checksums, data,
+      NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
           fileName, basePos);
           fileName, basePos);
       return;
       return;
     }
     }
@@ -280,7 +294,7 @@ public class DataChecksum implements Checksum {
     checksums.mark();
     checksums.mark();
     try {
     try {
       byte[] buf = new byte[bytesPerChecksum];
       byte[] buf = new byte[bytesPerChecksum];
-      byte[] sum = new byte[size];
+      byte[] sum = new byte[type.size];
       while (data.remaining() > 0) {
       while (data.remaining() > 0) {
         int n = Math.min(data.remaining(), bytesPerChecksum);
         int n = Math.min(data.remaining(), bytesPerChecksum);
         checksums.get(sum);
         checksums.get(sum);
@@ -351,7 +365,7 @@ public class DataChecksum implements Checksum {
    *                  buffer to put the checksums.
    *                  buffer to put the checksums.
    */
    */
   public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) {
   public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) {
-    if (size == 0) return;
+    if (type.size == 0) return;
     
     
     if (data.hasArray() && checksums.hasArray()) {
     if (data.hasArray() && checksums.hasArray()) {
       calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(),
       calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(),
@@ -411,18 +425,12 @@ public class DataChecksum implements Checksum {
   
   
   @Override
   @Override
   public int hashCode() {
   public int hashCode() {
-    return (this.type + 31) * this.bytesPerChecksum;
+    return (this.type.id + 31) * this.bytesPerChecksum;
   }
   }
   
   
   @Override
   @Override
   public String toString() {
   public String toString() {
-    String strType;
-    if (type < NAMES.length && type > 0) {
-      strType = NAMES[type];
-    } else {
-      strType = String.valueOf(type);
-    }
-    return "DataChecksum(type=" + strType +
+    return "DataChecksum(type=" + type +
       ", chunkSize=" + bytesPerChecksum + ")";
       ", chunkSize=" + bytesPerChecksum + ")";
   }
   }
   
   

+ 69 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java

@@ -204,11 +204,13 @@ public class LineReader {
       int startPosn = bufferPosn; //starting from where we left off the last time
       int startPosn = bufferPosn; //starting from where we left off the last time
       if (bufferPosn >= bufferLength) {
       if (bufferPosn >= bufferLength) {
         startPosn = bufferPosn = 0;
         startPosn = bufferPosn = 0;
-        if (prevCharCR)
+        if (prevCharCR) {
           ++bytesConsumed; //account for CR from previous read
           ++bytesConsumed; //account for CR from previous read
+        }
         bufferLength = in.read(buffer);
         bufferLength = in.read(buffer);
-        if (bufferLength <= 0)
+        if (bufferLength <= 0) {
           break; // EOF
           break; // EOF
+        }
       }
       }
       for (; bufferPosn < bufferLength; ++bufferPosn) { //search for newline
       for (; bufferPosn < bufferLength; ++bufferPosn) { //search for newline
         if (buffer[bufferPosn] == LF) {
         if (buffer[bufferPosn] == LF) {
@@ -223,8 +225,9 @@ public class LineReader {
         prevCharCR = (buffer[bufferPosn] == CR);
         prevCharCR = (buffer[bufferPosn] == CR);
       }
       }
       int readLength = bufferPosn - startPosn;
       int readLength = bufferPosn - startPosn;
-      if (prevCharCR && newlineLength == 0)
+      if (prevCharCR && newlineLength == 0) {
         --readLength; //CR at the end of the buffer
         --readLength; //CR at the end of the buffer
+      }
       bytesConsumed += readLength;
       bytesConsumed += readLength;
       int appendLength = readLength - newlineLength;
       int appendLength = readLength - newlineLength;
       if (appendLength > maxLineLength - txtLength) {
       if (appendLength > maxLineLength - txtLength) {
@@ -236,8 +239,9 @@ public class LineReader {
       }
       }
     } while (newlineLength == 0 && bytesConsumed < maxBytesToConsume);
     } while (newlineLength == 0 && bytesConsumed < maxBytesToConsume);
 
 
-    if (bytesConsumed > (long)Integer.MAX_VALUE)
-      throw new IOException("Too many bytes before newline: " + bytesConsumed);    
+    if (bytesConsumed > (long)Integer.MAX_VALUE) {
+      throw new IOException("Too many bytes before newline: " + bytesConsumed);
+    }
     return (int)bytesConsumed;
     return (int)bytesConsumed;
   }
   }
 
 
@@ -246,18 +250,56 @@ public class LineReader {
    */
    */
   private int readCustomLine(Text str, int maxLineLength, int maxBytesToConsume)
   private int readCustomLine(Text str, int maxLineLength, int maxBytesToConsume)
       throws IOException {
       throws IOException {
+   /* We're reading data from inputStream, but the head of the stream may be
+    *  already captured in the previous buffer, so we have several cases:
+    * 
+    * 1. The buffer tail does not contain any character sequence which
+    *    matches with the head of delimiter. We count it as a 
+    *    ambiguous byte count = 0
+    *    
+    * 2. The buffer tail contains a X number of characters,
+    *    that forms a sequence, which matches with the
+    *    head of delimiter. We count ambiguous byte count = X
+    *    
+    *    // ***  eg: A segment of input file is as follows
+    *    
+    *    " record 1792: I found this bug very interesting and
+    *     I have completely read about it. record 1793: This bug
+    *     can be solved easily record 1794: This ." 
+    *    
+    *    delimiter = "record";
+    *        
+    *    supposing:- String at the end of buffer =
+    *    "I found this bug very interesting and I have completely re"
+    *    There for next buffer = "ad about it. record 179       ...."           
+    *     
+    *     The matching characters in the input
+    *     buffer tail and delimiter head = "re" 
+    *     Therefore, ambiguous byte count = 2 ****   //
+    *     
+    *     2.1 If the following bytes are the remaining characters of
+    *         the delimiter, then we have to capture only up to the starting 
+    *         position of delimiter. That means, we need not include the 
+    *         ambiguous characters in str.
+    *     
+    *     2.2 If the following bytes are not the remaining characters of
+    *         the delimiter ( as mentioned in the example ), 
+    *         then we have to include the ambiguous characters in str. 
+    */
     str.clear();
     str.clear();
     int txtLength = 0; // tracks str.getLength(), as an optimization
     int txtLength = 0; // tracks str.getLength(), as an optimization
     long bytesConsumed = 0;
     long bytesConsumed = 0;
     int delPosn = 0;
     int delPosn = 0;
+    int ambiguousByteCount=0; // To capture the ambiguous characters count
     do {
     do {
-      int startPosn = bufferPosn; // starting from where we left off the last
-      // time
+      int startPosn = bufferPosn; // Start from previous end position
       if (bufferPosn >= bufferLength) {
       if (bufferPosn >= bufferLength) {
         startPosn = bufferPosn = 0;
         startPosn = bufferPosn = 0;
         bufferLength = in.read(buffer);
         bufferLength = in.read(buffer);
-        if (bufferLength <= 0)
+        if (bufferLength <= 0) {
+          str.append(recordDelimiterBytes, 0, ambiguousByteCount);
           break; // EOF
           break; // EOF
+        }
       }
       }
       for (; bufferPosn < bufferLength; ++bufferPosn) {
       for (; bufferPosn < bufferLength; ++bufferPosn) {
         if (buffer[bufferPosn] == recordDelimiterBytes[delPosn]) {
         if (buffer[bufferPosn] == recordDelimiterBytes[delPosn]) {
@@ -266,7 +308,8 @@ public class LineReader {
             bufferPosn++;
             bufferPosn++;
             break;
             break;
           }
           }
-        } else {
+        } else if (delPosn != 0) {
+          bufferPosn--;
           delPosn = 0;
           delPosn = 0;
         }
         }
       }
       }
@@ -277,14 +320,27 @@ public class LineReader {
         appendLength = maxLineLength - txtLength;
         appendLength = maxLineLength - txtLength;
       }
       }
       if (appendLength > 0) {
       if (appendLength > 0) {
+        if (ambiguousByteCount > 0) {
+          str.append(recordDelimiterBytes, 0, ambiguousByteCount);
+          //appending the ambiguous characters (refer case 2.2)
+          bytesConsumed += ambiguousByteCount;
+          ambiguousByteCount=0;
+        }
         str.append(buffer, startPosn, appendLength);
         str.append(buffer, startPosn, appendLength);
         txtLength += appendLength;
         txtLength += appendLength;
       }
       }
-    } while (delPosn < recordDelimiterBytes.length
+      if (bufferPosn >= bufferLength) {
+        if (delPosn > 0 && delPosn < recordDelimiterBytes.length) {
+          ambiguousByteCount = delPosn;
+          bytesConsumed -= ambiguousByteCount; //to be consumed in next
+        }
+      }
+    } while (delPosn < recordDelimiterBytes.length 
         && bytesConsumed < maxBytesToConsume);
         && bytesConsumed < maxBytesToConsume);
-    if (bytesConsumed > (long) Integer.MAX_VALUE)
+    if (bytesConsumed > (long) Integer.MAX_VALUE) {
       throw new IOException("Too many bytes before delimiter: " + bytesConsumed);
       throw new IOException("Too many bytes before delimiter: " + bytesConsumed);
-    return (int) bytesConsumed;
+    }
+    return (int) bytesConsumed; 
   }
   }
 
 
   /**
   /**
@@ -296,7 +352,7 @@ public class LineReader {
    */
    */
   public int readLine(Text str, int maxLineLength) throws IOException {
   public int readLine(Text str, int maxLineLength) throws IOException {
     return readLine(str, maxLineLength, Integer.MAX_VALUE);
     return readLine(str, maxLineLength, Integer.MAX_VALUE);
-}
+  }
 
 
   /**
   /**
    * Read from the InputStream into the given Text.
    * Read from the InputStream into the given Text.
@@ -307,5 +363,4 @@ public class LineReader {
   public int readLine(Text str) throws IOException {
   public int readLine(Text str) throws IOException {
     return readLine(str, Integer.MAX_VALUE, Integer.MAX_VALUE);
     return readLine(str, Integer.MAX_VALUE, Integer.MAX_VALUE);
   }
   }
-
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java

@@ -47,7 +47,7 @@ public class NativeCodeLoader {
     }
     }
     try {
     try {
       System.loadLibrary("hadoop");
       System.loadLibrary("hadoop");
-      LOG.info("Loaded the native-hadoop library");
+      LOG.debug("Loaded the native-hadoop library");
       nativeCodeLoaded = true;
       nativeCodeLoaded = true;
     } catch (Throwable t) {
     } catch (Throwable t) {
       // Ignore failure to load
       // Ignore failure to load

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c

@@ -24,7 +24,7 @@
 // Simple Functions
 // Simple Functions
 //****************************
 //****************************
 
 
-extern int LZ4_compress   (char* source, char* dest, int isize);
+extern int LZ4_compress   (const char* source, char* dest, int isize);
 
 
 /*
 /*
 LZ4_compress() :
 LZ4_compress() :

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c

@@ -20,7 +20,7 @@
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 
 
-int LZ4_uncompress_unknownOutputSize (char* source, char* dest, int isize, int maxOutputSize);
+int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize);
 
 
 /*
 /*
 LZ4_uncompress_unknownOutputSize() :
 LZ4_uncompress_unknownOutputSize() :

+ 16 - 6
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

@@ -25,6 +25,8 @@
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
 
+#define JINT_MAX 0x7fffffff
+
 static jfieldID SnappyCompressor_clazz;
 static jfieldID SnappyCompressor_clazz;
 static jfieldID SnappyCompressor_uncompressedDirectBuf;
 static jfieldID SnappyCompressor_uncompressedDirectBuf;
 static jfieldID SnappyCompressor_uncompressedDirectBufLen;
 static jfieldID SnappyCompressor_uncompressedDirectBufLen;
@@ -39,7 +41,7 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   // Load libsnappy.so
   // Load libsnappy.so
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
   if (!libsnappy) {
-    char* msg = (char*)malloc(1000);
+    char msg[1000];
     snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
     snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
     return;
@@ -71,6 +73,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
   jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
+  size_t buf_len;
 
 
   // Get the input direct buffer
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
   LOCK_CLASS(env, clazz, "SnappyCompressor");
@@ -78,7 +81,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
 
   if (uncompressed_bytes == 0) {
   if (uncompressed_bytes == 0) {
-    return (jint)0;
+    return 0;
   }
   }
 
 
   // Get the output direct buffer
   // Get the output direct buffer
@@ -87,15 +90,22 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
 
   if (compressed_bytes == 0) {
   if (compressed_bytes == 0) {
-    return (jint)0;
+    return 0;
   }
   }
 
 
-  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len, compressed_bytes, &compressed_direct_buf_len);
+  /* size_t should always be 4 bytes or larger. */
+  buf_len = (size_t)compressed_direct_buf_len;
+  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
+        uncompressed_direct_buf_len, compressed_bytes, &buf_len);
   if (ret != SNAPPY_OK){
   if (ret != SNAPPY_OK){
     THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
     THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
+    return 0;
+  }
+  if (buf_len > JINT_MAX) {
+    THROW(env, "Ljava/lang/InternalError", "Invalid return buffer length.");
+    return 0;
   }
   }
 
 
   (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
   (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
-
-  return (jint)compressed_direct_buf_len;
+  return (jint)buf_len;
 }
 }

+ 7 - 13
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c

@@ -16,6 +16,8 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
+#define _GNU_SOURCE
+
 #include <assert.h>
 #include <assert.h>
 #include <errno.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <fcntl.h>
@@ -366,23 +368,15 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
  */
  */
 static void throw_ioe(JNIEnv* env, int errnum)
 static void throw_ioe(JNIEnv* env, int errnum)
 {
 {
-  const char* message;
-  char buffer[80];
+  char message[80];
   jstring jstr_message;
   jstring jstr_message;
 
 
-  buffer[0] = 0;
-#ifdef STRERROR_R_CHAR_P
-  // GNU strerror_r
-  message = strerror_r(errnum, buffer, sizeof(buffer));
-  assert (message != NULL);
-#else
-  int ret = strerror_r(errnum, buffer, sizeof(buffer));
-  if (ret == 0) {
-    message = buffer;
+  if ((errnum >= 0) && (errnum < sys_nerr)) {
+    snprintf(message, sizeof(message), "%s", sys_errlist[errnum]);
   } else {
   } else {
-    message = "Unknown error";
+    snprintf(message, sizeof(message), "Unknown error %d", errnum);
   }
   }
-#endif
+
   jobject errno_obj = errno_to_enum(env, errnum);
   jobject errno_obj = errno_to_enum(env, errnum);
 
 
   if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL)
   if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL)

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c

@@ -40,8 +40,8 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupForUser
 (JNIEnv *env, jobject jobj, jstring juser) {
 (JNIEnv *env, jobject jobj, jstring juser) {
   extern int getGroupIDList(const char *user, int *ngroups, gid_t **groups);
   extern int getGroupIDList(const char *user, int *ngroups, gid_t **groups);
   extern int getGroupDetails(gid_t group, char **grpBuf);
   extern int getGroupDetails(gid_t group, char **grpBuf);
-
-  jobjectArray jgroups; 
+  const char *cuser = NULL;
+  jobjectArray jgroups = NULL;
   int error = -1;
   int error = -1;
 
 
   if (emptyGroups == NULL) {
   if (emptyGroups == NULL) {
@@ -56,7 +56,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupForUser
     }
     }
   }
   }
   char *grpBuf = NULL;
   char *grpBuf = NULL;
-  const char *cuser = (*env)->GetStringUTFChars(env, juser, NULL);
+  cuser = (*env)->GetStringUTFChars(env, juser, NULL);
   if (cuser == NULL) {
   if (cuser == NULL) {
     goto cleanup;
     goto cleanup;
   }
   }

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c

@@ -45,6 +45,8 @@ typedef struct listElement UserList;
 JNIEXPORT jobjectArray JNICALL 
 JNIEXPORT jobjectArray JNICALL 
 Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
 Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
 (JNIEnv *env, jobject jobj, jstring jgroup) {
 (JNIEnv *env, jobject jobj, jstring jgroup) {
+  UserList *userListHead = NULL;
+  int       userListSize = 0;
 
 
   // pointers to free at the end
   // pointers to free at the end
   const char *cgroup  = NULL;
   const char *cgroup  = NULL;
@@ -65,9 +67,6 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   // get users
   // get users
   // see man pages for setnetgrent, getnetgrent and endnetgrent
   // see man pages for setnetgrent, getnetgrent and endnetgrent
 
 
-  UserList *userListHead = NULL;
-  int       userListSize = 0;
-
   // set the name of the group for subsequent calls to getnetgrent
   // set the name of the group for subsequent calls to getnetgrent
   // note that we want to end group lokup regardless whether setnetgrent
   // note that we want to end group lokup regardless whether setnetgrent
   // was successful or not (as long as it was called we need to call
   // was successful or not (as long as it was called we need to call

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c

@@ -18,6 +18,7 @@
 
 
 #include <arpa/inet.h>
 #include <arpa/inet.h>
 #include <assert.h>
 #include <assert.h>
+#include <inttypes.h>
 #include <stdlib.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <stdint.h>
 #include <string.h>
 #include <string.h>
@@ -50,7 +51,7 @@ static void throw_checksum_exception(JNIEnv *env,
 
 
   // Format error message
   // Format error message
   snprintf(message, sizeof(message),
   snprintf(message, sizeof(message),
-    "Checksum error: %s at %ld exp: %d got: %d",
+    "Checksum error: %s at %"PRId64" exp: %"PRId32" got: %"PRId32,
     filename, pos, expected_crc, got_crc);
     filename, pos, expected_crc, got_crc);
   if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
   if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
     goto cleanup;
     goto cleanup;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c

@@ -41,7 +41,7 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
 
 
 #ifdef USE_PIPELINED
 #ifdef USE_PIPELINED
 static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
 static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
-#endif USE_PIPELINED
+#endif
 static int cached_cpu_supports_crc32; // initialized by constructor below
 static int cached_cpu_supports_crc32; // initialized by constructor below
 static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
 static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
 
 

+ 17 - 3
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -351,8 +351,12 @@
   <name>fs.trash.interval</name>
   <name>fs.trash.interval</name>
   <value>0</value>
   <value>0</value>
   <description>Number of minutes after which the checkpoint
   <description>Number of minutes after which the checkpoint
-  gets deleted.
-  If zero, the trash feature is disabled.
+  gets deleted.  If zero, the trash feature is disabled.
+  This option may be configured both on the server and the
+  client. If trash is disabled server side then the client
+  side configuration is checked. If trash is enabled on the
+  server side then the value configured on the server is
+  used and the client configuration value is ignored.
   </description>
   </description>
 </property>
 </property>
 
 
@@ -360,7 +364,8 @@
   <name>fs.trash.checkpoint.interval</name>
   <name>fs.trash.checkpoint.interval</name>
   <value>0</value>
   <value>0</value>
   <description>Number of minutes between trash checkpoints.
   <description>Number of minutes between trash checkpoints.
-  Should be smaller or equal to fs.trash.interval.
+  Should be smaller or equal to fs.trash.interval. If zero,
+  the value is set to the value of fs.trash.interval.
   Every time the checkpointer runs it creates a new checkpoint 
   Every time the checkpointer runs it creates a new checkpoint 
   out of current and removes checkpoints created more than 
   out of current and removes checkpoints created more than 
   fs.trash.interval minutes ago.
   fs.trash.interval minutes ago.
@@ -1083,4 +1088,13 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>hadoop.jetty.logs.serve.aliases</name>
+  <value>true</value>
+  <description>
+    Enable/Disable aliases serving from jetty
+  </description>
+</property>
+
+
 </configuration>
 </configuration>

+ 7 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -39,6 +39,7 @@ import java.util.regex.Pattern;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNotNull;
 
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -1157,6 +1158,12 @@ public class TestConfiguration extends TestCase {
         configuration.getPattern("testPattern", Pattern.compile("")).pattern());
         configuration.getPattern("testPattern", Pattern.compile("")).pattern());
   }
   }
   
   
+  public void testGetClassByNameOrNull() throws Exception {
+   Configuration config = new Configuration();
+   Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
+   assertNotNull(clazz);
+  }
+  
   public static void main(String[] argv) throws Exception {
   public static void main(String[] argv) throws Exception {
     junit.textui.TestRunner.main(new String[]{
     junit.textui.TestRunner.main(new String[]{
       TestConfiguration.class.getName()
       TestConfiguration.class.getName()

+ 11 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -364,15 +364,17 @@ public abstract class FSMainOperationsBaseTest  {
   }
   }
   
   
   @Test
   @Test
-  public void testGlobStatusThrowsExceptionForNonExistentFile() throws Exception {
-    try {
-      // This should throw a FileNotFoundException
-      fSys.globStatus(
-          getTestRootPath(fSys, "test/hadoopfsdf/?"));
-      Assert.fail("Should throw FileNotFoundException");
-    } catch (FileNotFoundException fnfe) {
-      // expected
-    }
+  public void testGlobStatusNonExistentFile() throws Exception {
+    FileStatus[] paths = fSys.globStatus(
+        getTestRootPath(fSys, "test/hadoopfsdf"));
+    Assert.assertNull(paths);
+
+    paths = fSys.globStatus(
+        getTestRootPath(fSys, "test/hadoopfsdf/?"));
+    Assert.assertEquals(0, paths.length);
+    paths = fSys.globStatus(
+        getTestRootPath(fSys, "test/hadoopfsdf/xyz*/?"));
+    Assert.assertEquals(0, paths.length);
   }
   }
   
   
   @Test
   @Test

+ 11 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java

@@ -360,15 +360,17 @@ public abstract class FileContextMainOperationsBaseTest  {
   }
   }
   
   
   @Test
   @Test
-  public void testGlobStatusThrowsExceptionForNonExistentFile() throws Exception {
-    try {
-      // This should throw a FileNotFoundException
-      fc.util().globStatus(
-          getTestRootPath(fc, "test/hadoopfsdf/?"));
-      Assert.fail("Should throw FileNotFoundException");
-    } catch (FileNotFoundException fnfe) {
-      // expected
-    }
+  public void testGlobStatusNonExistentFile() throws Exception {
+    FileStatus[] paths = fc.util().globStatus(
+          getTestRootPath(fc, "test/hadoopfsdf"));
+    Assert.assertNull(paths);
+
+    paths = fc.util().globStatus(
+        getTestRootPath(fc, "test/hadoopfsdf/?"));
+    Assert.assertEquals(0, paths.length);
+    paths = fc.util().globStatus(
+        getTestRootPath(fc, "test/hadoopfsdf/xyz*/?"));
+    Assert.assertEquals(0, paths.length);
   }
   }
   
   
   @Test
   @Test

+ 37 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -24,8 +24,10 @@ import java.util.Random;
 
 
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Assert;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
 
 
 /**
 /**
  * Helper class for unit tests.
  * Helper class for unit tests.
@@ -218,4 +220,39 @@ public final class FileSystemTestHelper {
     }
     }
     Assert.assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
     Assert.assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
   }
   }
+  
+  /**
+   * Class to enable easier mocking of a FileSystem
+   * Use getRawFileSystem to retrieve the mock
+   */
+  public static class MockFileSystem extends FilterFileSystem {
+    public MockFileSystem() {
+      // it's a bit ackward to mock ourselves, but it allows the visibility
+      // of methods to be increased
+      super(mock(MockFileSystem.class));
+    }
+    @Override
+    public MockFileSystem getRawFileSystem() {
+      return (MockFileSystem) super.getRawFileSystem();
+      
+    }
+    // these basic methods need to directly propagate to the mock to be
+    // more transparent
+    @Override
+    public void initialize(URI uri, Configuration conf) throws IOException {
+      fs.initialize(uri, conf);
+    }
+    @Override
+    public String getCanonicalServiceName() {
+      return fs.getCanonicalServiceName();
+    }
+    @Override
+    public FileSystem[] getChildFileSystems() {
+      return fs.getChildFileSystems();
+    }
+    @Override // publicly expose for mocking
+    public Token<?> getDelegationToken(String renewer) throws IOException {
+      return fs.getDelegationToken(renewer);
+    }    
+  }
 }
 }

+ 2 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java

@@ -24,6 +24,7 @@ import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.EnumSet;
 
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.junit.Test;
 import org.junit.Test;
@@ -76,7 +77,7 @@ public class TestAfsCheckPath {
     @Override
     @Override
     public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
     public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
         FsPermission absolutePermission, int bufferSize, short replication,
         FsPermission absolutePermission, int bufferSize, short replication,
-        long blockSize, Progressable progress, int bytesPerChecksum,
+        long blockSize, Progressable progress, ChecksumOpt checksumOpt,
         boolean createParent) throws IOException {
         boolean createParent) throws IOException {
       // deliberately empty
       // deliberately empty
       return null;
       return null;

+ 7 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java

@@ -18,18 +18,20 @@
 
 
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
-public class TestFileSystemCanonicalization extends TestCase {
+public class TestFileSystemCanonicalization {
   static String[] authorities = {
   static String[] authorities = {
     "myfs://host",
     "myfs://host",
     "myfs://host.a",
     "myfs://host.a",
@@ -41,8 +43,8 @@ public class TestFileSystemCanonicalization extends TestCase {
   };
   };
 
 
 
 
-  @Test
-  public void testSetupResolver() throws Exception {
+  @BeforeClass
+  public static void initialize() throws Exception {
     NetUtilsTestResolver.install();
     NetUtilsTestResolver.install();
   }
   }
 
 

+ 279 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemTokens.java

@@ -0,0 +1,279 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystemTestHelper.MockFileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class TestFileSystemTokens {
+  private static String renewer = "renewer!";
+
+  @Test
+  public void testFsWithNoToken() throws Exception {
+    MockFileSystem fs = createFileSystemForServiceName(null);  
+    Credentials credentials = new Credentials();
+    
+    fs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(fs, false);
+    assertEquals(0, credentials.numberOfTokens());
+  }
+  
+  @Test
+  public void testFsWithToken() throws Exception {
+    Text service = new Text("singleTokenFs");
+    MockFileSystem fs = createFileSystemForServiceName(service);
+    Credentials credentials = new Credentials();
+    
+    fs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(fs, true);
+    
+    assertEquals(1, credentials.numberOfTokens());
+    assertNotNull(credentials.getToken(service));
+  }
+
+  @Test
+  public void testFsWithTokenExists() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service = new Text("singleTokenFs");
+    MockFileSystem fs = createFileSystemForServiceName(service);
+    Token<?> token = mock(Token.class);
+    credentials.addToken(service, token);
+    
+    fs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(fs, false);
+    
+    assertEquals(1, credentials.numberOfTokens());
+    assertSame(token, credentials.getToken(service));
+  }
+  
+  @Test
+  public void testFsWithChildTokens() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service1 = new Text("singleTokenFs1");
+    Text service2 = new Text("singleTokenFs2");
+
+    MockFileSystem fs1 = createFileSystemForServiceName(service1);
+    MockFileSystem fs2 = createFileSystemForServiceName(service2);
+    MockFileSystem fs3 = createFileSystemForServiceName(null);
+    MockFileSystem multiFs = 
+        createFileSystemForServiceName(null, fs1, fs2, fs3);
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, false); // has no tokens of own, only child tokens
+    verifyTokenFetch(fs1, true);
+    verifyTokenFetch(fs2, true);
+    verifyTokenFetch(fs3, false);
+    
+    assertEquals(2, credentials.numberOfTokens());
+    assertNotNull(credentials.getToken(service1));
+    assertNotNull(credentials.getToken(service2));
+  }
+
+  @Test
+  public void testFsWithDuplicateChildren() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service = new Text("singleTokenFs1");
+
+    MockFileSystem fs = createFileSystemForServiceName(service);
+    MockFileSystem multiFs =
+        createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, false);
+    verifyTokenFetch(fs, true);
+    
+    assertEquals(1, credentials.numberOfTokens());
+    assertNotNull(credentials.getToken(service));
+  }
+
+  @Test
+  public void testFsWithDuplicateChildrenTokenExists() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service = new Text("singleTokenFs1");
+    Token<?> token = mock(Token.class);
+    credentials.addToken(service, token);
+
+    MockFileSystem fs = createFileSystemForServiceName(service);
+    MockFileSystem multiFs =
+        createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, false);
+    verifyTokenFetch(fs, false);
+    
+    assertEquals(1, credentials.numberOfTokens());
+    assertSame(token, credentials.getToken(service));
+  }
+
+  @Test
+  public void testFsWithChildTokensOneExists() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service1 = new Text("singleTokenFs1");
+    Text service2 = new Text("singleTokenFs2");
+    Token<?> token = mock(Token.class);
+    credentials.addToken(service2, token);
+
+    MockFileSystem fs1 = createFileSystemForServiceName(service1);
+    MockFileSystem fs2 = createFileSystemForServiceName(service2);
+    MockFileSystem fs3 = createFileSystemForServiceName(null);
+    MockFileSystem multiFs = createFileSystemForServiceName(null, fs1, fs2, fs3);
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, false);
+    verifyTokenFetch(fs1, true);
+    verifyTokenFetch(fs2, false); // we had added its token to credentials
+    verifyTokenFetch(fs3, false);
+    
+    assertEquals(2, credentials.numberOfTokens());
+    assertNotNull(credentials.getToken(service1));
+    assertSame(token, credentials.getToken(service2));
+  }
+
+  @Test
+  public void testFsWithMyOwnAndChildTokens() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service1 = new Text("singleTokenFs1");
+    Text service2 = new Text("singleTokenFs2");
+    Text myService = new Text("multiTokenFs");
+    Token<?> token = mock(Token.class);
+    credentials.addToken(service2, token);
+
+    MockFileSystem fs1 = createFileSystemForServiceName(service1);
+    MockFileSystem fs2 = createFileSystemForServiceName(service2);
+    MockFileSystem multiFs = createFileSystemForServiceName(myService, fs1, fs2);
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, true); // its own token and also of its children
+    verifyTokenFetch(fs1, true);
+    verifyTokenFetch(fs2, false);  // we had added its token to credentials 
+    
+    assertEquals(3, credentials.numberOfTokens());
+    assertNotNull(credentials.getToken(myService));
+    assertNotNull(credentials.getToken(service1));
+    assertNotNull(credentials.getToken(service2));
+  }
+
+
+  @Test
+  public void testFsWithMyOwnExistsAndChildTokens() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service1 = new Text("singleTokenFs1");
+    Text service2 = new Text("singleTokenFs2");
+    Text myService = new Text("multiTokenFs");
+    Token<?> token = mock(Token.class);
+    credentials.addToken(myService, token);
+
+    MockFileSystem fs1 = createFileSystemForServiceName(service1);
+    MockFileSystem fs2 = createFileSystemForServiceName(service2);
+    MockFileSystem multiFs = createFileSystemForServiceName(myService, fs1, fs2);
+    
+    multiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(multiFs, false);  // we had added its token to credentials
+    verifyTokenFetch(fs1, true);
+    verifyTokenFetch(fs2, true);
+    
+    assertEquals(3, credentials.numberOfTokens());
+    assertSame(token, credentials.getToken(myService));
+    assertNotNull(credentials.getToken(service1));
+    assertNotNull(credentials.getToken(service2));
+  }
+
+  @Test
+  public void testFsWithNestedDuplicatesChildren() throws Exception {
+    Credentials credentials = new Credentials();
+    Text service1 = new Text("singleTokenFs1");
+    Text service2 = new Text("singleTokenFs2");
+    Text service4 = new Text("singleTokenFs4");
+    Text multiService = new Text("multiTokenFs");
+    Token<?> token2 = mock(Token.class);
+    credentials.addToken(service2, token2);
+    
+    MockFileSystem fs1 = createFileSystemForServiceName(service1);
+    MockFileSystem fs1B = createFileSystemForServiceName(service1);
+    MockFileSystem fs2 = createFileSystemForServiceName(service2);
+    MockFileSystem fs3 = createFileSystemForServiceName(null);
+    MockFileSystem fs4 = createFileSystemForServiceName(service4);
+    // now let's get dirty!  ensure dup tokens aren't fetched even when
+    // repeated and dupped in a nested fs.  fs4 is a real test of the drill
+    // down: multi-filter-multi-filter-filter-fs4.
+    MockFileSystem multiFs = createFileSystemForServiceName(multiService,
+        fs1, fs1B, fs2, fs2, new FilterFileSystem(fs3),
+        new FilterFileSystem(new FilterFileSystem(fs4)));
+    MockFileSystem superMultiFs = createFileSystemForServiceName(null,
+        fs1, fs1B, fs1, new FilterFileSystem(fs3), new FilterFileSystem(multiFs));
+    superMultiFs.addDelegationTokens(renewer, credentials);
+    verifyTokenFetch(superMultiFs, false); // does not have its own token
+    verifyTokenFetch(multiFs, true); // has its own token
+    verifyTokenFetch(fs1, true);
+    verifyTokenFetch(fs2, false); // we had added its token to credentials
+    verifyTokenFetch(fs3, false); // has no tokens
+    verifyTokenFetch(fs4, true);
+    
+    assertEquals(4, credentials.numberOfTokens()); //fs1+fs2+fs4+multifs (fs3=0)
+    assertNotNull(credentials.getToken(service1));
+    assertNotNull(credentials.getToken(service2));
+    assertSame(token2, credentials.getToken(service2));
+    assertNotNull(credentials.getToken(multiService));
+    assertNotNull(credentials.getToken(service4));
+  }
+
+  public static MockFileSystem createFileSystemForServiceName(
+      final Text service, final FileSystem... children) throws IOException {
+    final MockFileSystem fs = new MockFileSystem();
+    final MockFileSystem mockFs = fs.getRawFileSystem();
+    if (service != null) {
+      when(mockFs.getCanonicalServiceName()).thenReturn(service.toString());
+      when(mockFs.getDelegationToken(any(String.class))).thenAnswer(
+        new Answer<Token<?>>() {
+          @Override
+          public Token<?> answer(InvocationOnMock invocation) throws Throwable {
+            Token<?> token = new Token<TokenIdentifier>();
+            token.setService(service);
+            return token;
+          }
+        });
+    }
+    when(mockFs.getChildFileSystems()).thenReturn(children);
+    return fs;
+  }
+
+  // check that canonical name was requested, if renewer is not null that
+  // a token was requested, and that child fs was invoked
+  private void verifyTokenFetch(MockFileSystem fs, boolean expected) throws IOException {
+    verify(fs.getRawFileSystem(), atLeast(1)).getCanonicalServiceName();
+    if (expected) {
+      verify(fs.getRawFileSystem()).getDelegationToken(renewer);    
+    } else {
+      verify(fs.getRawFileSystem(), never()).getDelegationToken(any(String.class));
+    }
+    verify(fs.getRawFileSystem(), atLeast(1)).getChildFileSystems();
+  }
+}

+ 21 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -32,8 +32,10 @@ import java.util.Iterator;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
@@ -79,6 +81,11 @@ public class TestFilterFileSystem {
             Progressable progress) throws IOException {
             Progressable progress) throws IOException {
       return null;
       return null;
     }
     }
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+            EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+            Progressable progress, ChecksumOpt checksumOpt) throws IOException {
+      return null;
+    }
     public boolean mkdirs(Path f) { return false; }
     public boolean mkdirs(Path f) { return false; }
     public FSDataInputStream open(Path f) { return null; }
     public FSDataInputStream open(Path f) { return null; }
     public FSDataOutputStream create(Path f) { return null; }
     public FSDataOutputStream create(Path f) { return null; }
@@ -137,6 +144,16 @@ public class TestFilterFileSystem {
         Progressable progress) throws IOException {
         Progressable progress) throws IOException {
       return null;
       return null;
     }
     }
+    public FSDataOutputStream create(Path f,
+        FsPermission permission,
+        EnumSet<CreateFlag> flags,
+        int bufferSize,
+        short replication,
+        long blockSize,
+        Progressable progress,
+        ChecksumOpt checksumOpt) throws IOException {
+      return null;
+    }
     public String getName() { return null; }
     public String getName() { return null; }
     public boolean delete(Path f) { return false; }
     public boolean delete(Path f) { return false; }
     public short getReplication(Path src) { return 0 ; }
     public short getReplication(Path src) { return 0 ; }
@@ -185,6 +202,10 @@ public class TestFilterFileSystem {
     public boolean cancelDeleteOnExit(Path f) throws IOException {
     public boolean cancelDeleteOnExit(Path f) throws IOException {
       return false;
       return false;
     }
     }
+    public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
+        throws IOException {
+      return null;
+    }
     public String getScheme() {
     public String getScheme() {
       return "dontcheck";
       return "dontcheck";
     }
     }

+ 70 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.util.DataChecksum;
+
+import org.junit.Test;
+
+public class TestFsOptions {
+
+  @Test
+  public void testProcessChecksumOpt() {
+    ChecksumOpt defaultOpt = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
+    ChecksumOpt finalOpt;
+
+    // Give a null 
+    finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, null);
+    checkParams(defaultOpt, finalOpt);
+
+    // null with bpc
+    finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, null, 1024);
+    checkParams(DataChecksum.Type.CRC32, 1024, finalOpt);
+
+    ChecksumOpt myOpt = new ChecksumOpt();
+
+    // custom with unspecified parameters
+    finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt);
+    checkParams(defaultOpt, finalOpt);
+
+    myOpt = new ChecksumOpt(DataChecksum.Type.CRC32C, 2048);
+
+    // custom config
+    finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt);
+    checkParams(DataChecksum.Type.CRC32C, 2048, finalOpt);
+
+    // custom config + bpc
+    finalOpt = ChecksumOpt.processChecksumOpt(defaultOpt, myOpt, 4096);
+    checkParams(DataChecksum.Type.CRC32C, 4096, finalOpt);
+  }
+
+  private void checkParams(ChecksumOpt expected, ChecksumOpt obtained) {
+    assertEquals(expected.getChecksumType(), obtained.getChecksumType());
+    assertEquals(expected.getBytesPerChecksum(), obtained.getBytesPerChecksum());
+  }
+
+  private void checkParams(DataChecksum.Type type, int bpc, ChecksumOpt obtained) {
+    assertEquals(type, obtained.getChecksumType());
+    assertEquals(bpc, obtained.getBytesPerChecksum());
+  }
+}

+ 0 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java

@@ -32,7 +32,6 @@ import java.util.List;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
-import org.apache.ftpserver.command.impl.STAT;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
 import org.apache.hadoop.fs.shell.FsCommand;

+ 12 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java

@@ -343,14 +343,18 @@ public class TestLocalDirAllocator {
   @Test
   @Test
   public void testRemoveContext() throws IOException {
   public void testRemoveContext() throws IOException {
     String dir = buildBufferDir(ROOT, 0);
     String dir = buildBufferDir(ROOT, 0);
-    String contextCfgItemName = "application_1340842292563_0004.app.cache.dirs";
-    conf.set(contextCfgItemName, dir);
-    LocalDirAllocator localDirAllocator = new LocalDirAllocator(
-        contextCfgItemName);
-    localDirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
-    assertTrue(LocalDirAllocator.isContextValid(contextCfgItemName));
-    LocalDirAllocator.removeContext(contextCfgItemName);
-    assertFalse(LocalDirAllocator.isContextValid(contextCfgItemName));
+    try {
+      String contextCfgItemName = "application_1340842292563_0004.app.cache.dirs";
+      conf.set(contextCfgItemName, dir);
+      LocalDirAllocator localDirAllocator = new LocalDirAllocator(
+          contextCfgItemName);
+      localDirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
+      assertTrue(LocalDirAllocator.isContextValid(contextCfgItemName));
+      LocalDirAllocator.removeContext(contextCfgItemName);
+      assertFalse(LocalDirAllocator.isContextValid(contextCfgItemName));
+    } finally {
+      rmBufferDirs();
+    }
   }
   }
 
 
 }
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -33,8 +33,8 @@ import org.junit.Test;
  * This class tests the local file system via the FileSystem abstraction.
  * This class tests the local file system via the FileSystem abstraction.
  */
  */
 public class TestLocalFileSystem {
 public class TestLocalFileSystem {
-  private static String TEST_ROOT_DIR
-    = System.getProperty("test.build.data","build/test/data/work-dir/localfs");
+  private static final String TEST_ROOT_DIR
+    = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
 
 
   private Configuration conf;
   private Configuration conf;
   private FileSystem fileSys;
   private FileSystem fileSys;

+ 4 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java

@@ -111,10 +111,10 @@ public class TestTrash extends TestCase {
       throws IOException {
       throws IOException {
     FileSystem fs = FileSystem.get(conf);
     FileSystem fs = FileSystem.get(conf);
 
 
-    conf.set(FS_TRASH_INTERVAL_KEY, "0"); // disabled
+    conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
     assertFalse(new Trash(conf).isEnabled());
     assertFalse(new Trash(conf).isEnabled());
 
 
-    conf.set(FS_TRASH_INTERVAL_KEY, "10"); // 10 minute
+    conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
     assertTrue(new Trash(conf).isEnabled());
     assertTrue(new Trash(conf).isEnabled());
 
 
     FsShell shell = new FsShell();
     FsShell shell = new FsShell();
@@ -435,7 +435,7 @@ public class TestTrash extends TestCase {
   }
   }
 
 
   public static void trashNonDefaultFS(Configuration conf) throws IOException {
   public static void trashNonDefaultFS(Configuration conf) throws IOException {
-    conf.set(FS_TRASH_INTERVAL_KEY, "10"); // 10 minute
+    conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
     // attempt non-default FileSystem trash
     // attempt non-default FileSystem trash
     {
     {
       final FileSystem lfs = FileSystem.getLocal(conf);
       final FileSystem lfs = FileSystem.getLocal(conf);
@@ -580,7 +580,7 @@ public class TestTrash extends TestCase {
     FileSystem fs = FileSystem.getLocal(conf);
     FileSystem fs = FileSystem.getLocal(conf);
     
     
     conf.set("fs.defaultFS", fs.getUri().toString());
     conf.set("fs.defaultFS", fs.getUri().toString());
-    conf.set(FS_TRASH_INTERVAL_KEY, "10"); //minutes..
+    conf.setLong(FS_TRASH_INTERVAL_KEY, 10); //minutes..
     FsShell shell = new FsShell();
     FsShell shell = new FsShell();
     shell.setConf(conf);
     shell.setConf(conf);
     //Path trashRoot = null;
     //Path trashRoot = null;

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCommandFactory.java

@@ -48,6 +48,10 @@ public class TestCommandFactory {
     factory.addClass(TestCommand3.class, "tc3");
     factory.addClass(TestCommand3.class, "tc3");
     names = factory.getNames();
     names = factory.getNames();
     assertArrayEquals(new String []{"tc1", "tc2", "tc2.1", "tc3"}, names);
     assertArrayEquals(new String []{"tc1", "tc2", "tc2.1", "tc3"}, names);
+    
+    factory.addClass(TestCommand4.class, (new TestCommand4()).getName());
+    names = factory.getNames();
+    assertArrayEquals(new String[]{"tc1", "tc2", "tc2.1", "tc3", "tc4"}, names);
   }
   }
   
   
   @Test
   @Test
@@ -72,8 +76,17 @@ public class TestCommandFactory {
     assertNotNull(instance);
     assertNotNull(instance);
     assertEquals(TestCommand2.class, instance.getClass());    
     assertEquals(TestCommand2.class, instance.getClass());    
     assertEquals("tc2.1", instance.getCommandName());
     assertEquals("tc2.1", instance.getCommandName());
+    
+    factory.addClass(TestCommand4.class, "tc4");
+    instance = factory.getInstance("tc4");
+    assertNotNull(instance);
+    assertEquals(TestCommand4.class, instance.getClass());    
+    assertEquals("tc4", instance.getCommandName());
+    String usage = instance.getUsage();
+    assertEquals("-tc4 tc4_usage", usage);
+    assertEquals("tc4_description", instance.getDescription());
   }
   }
-  
+
   static class TestRegistrar {
   static class TestRegistrar {
     public static void registerCommands(CommandFactory factory) {
     public static void registerCommands(CommandFactory factory) {
       factory.addClass(TestCommand1.class, "tc1");
       factory.addClass(TestCommand1.class, "tc1");
@@ -84,4 +97,10 @@ public class TestCommandFactory {
   static class TestCommand1 extends FsCommand {}
   static class TestCommand1 extends FsCommand {}
   static class TestCommand2 extends FsCommand {}
   static class TestCommand2 extends FsCommand {}
   static class TestCommand3 extends FsCommand {}
   static class TestCommand3 extends FsCommand {}
+  
+  static class TestCommand4 extends FsCommand {
+    static final String NAME = "tc4";
+    static final String USAGE = "tc4_usage";
+    static final String DESCRIPTION = "tc4_description";
+  }
 }
 }

+ 38 - 40
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java

@@ -26,23 +26,17 @@ import java.util.Arrays;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestPathData {
 public class TestPathData {
-  protected static Configuration conf;
-  protected static FileSystem fs;
-  protected static String dirString;
-  protected static Path testDir;
-  protected static PathData item;
-  
-  protected static String[] d1Paths =
-    new String[] { "d1/f1", "d1/f1.1", "d1/f2" };
-  protected static String[] d2Paths =
-    new String[] { "d2/f3" };
-        
-  @BeforeClass
-  public static void initialize() throws Exception {
+  protected Configuration conf;
+  protected FileSystem fs;
+  protected Path testDir;
+
+  @Before
+  public void initialize() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
     fs = FileSystem.getLocal(conf);
     fs = FileSystem.getLocal(conf);
     testDir = new Path(
     testDir = new Path(
@@ -60,23 +54,28 @@ public class TestPathData {
     fs.create(new Path("d2","f3"));
     fs.create(new Path("d2","f3"));
   }
   }
 
 
+  @After
+  public void cleanup() throws Exception {
+    fs.close();
+  }
+
   @Test
   @Test
   public void testWithDirStringAndConf() throws Exception {
   public void testWithDirStringAndConf() throws Exception {
-    dirString = "d1";
-    item = new PathData(dirString, conf);
-    checkPathData();
+    String dirString = "d1";
+    PathData item = new PathData(dirString, conf);
+    checkPathData(dirString, item);
 
 
     // properly implementing symlink support in various commands will require
     // properly implementing symlink support in various commands will require
     // trailing slashes to be retained
     // trailing slashes to be retained
     dirString = "d1/";
     dirString = "d1/";
     item = new PathData(dirString, conf);
     item = new PathData(dirString, conf);
-    checkPathData();
+    checkPathData(dirString, item);
   }
   }
 
 
   @Test
   @Test
   public void testUnqualifiedUriContents() throws Exception {
   public void testUnqualifiedUriContents() throws Exception {
-    dirString = "d1";
-    item = new PathData(dirString, conf);
+    String dirString = "d1";
+    PathData item = new PathData(dirString, conf);
     PathData[] items = item.getDirectoryContents();
     PathData[] items = item.getDirectoryContents();
     assertEquals(
     assertEquals(
         sortedString("d1/f1", "d1/f1.1", "d1/f2"),
         sortedString("d1/f1", "d1/f1.1", "d1/f2"),
@@ -86,8 +85,8 @@ public class TestPathData {
 
 
   @Test
   @Test
   public void testQualifiedUriContents() throws Exception {
   public void testQualifiedUriContents() throws Exception {
-    dirString = fs.makeQualified(new Path("d1")).toString();
-    item = new PathData(dirString, conf);
+    String dirString = fs.makeQualified(new Path("d1")).toString();
+    PathData item = new PathData(dirString, conf);
     PathData[] items = item.getDirectoryContents();
     PathData[] items = item.getDirectoryContents();
     assertEquals(
     assertEquals(
         sortedString(dirString+"/f1", dirString+"/f1.1", dirString+"/f2"),
         sortedString(dirString+"/f1", dirString+"/f1.1", dirString+"/f2"),
@@ -97,8 +96,8 @@ public class TestPathData {
 
 
   @Test
   @Test
   public void testCwdContents() throws Exception {
   public void testCwdContents() throws Exception {
-    dirString = Path.CUR_DIR;
-    item = new PathData(dirString, conf);
+    String dirString = Path.CUR_DIR;
+    PathData item = new PathData(dirString, conf);
     PathData[] items = item.getDirectoryContents();
     PathData[] items = item.getDirectoryContents();
     assertEquals(
     assertEquals(
         sortedString("d1", "d2"),
         sortedString("d1", "d2"),
@@ -106,17 +105,16 @@ public class TestPathData {
     );
     );
   }
   }
 
 
-
-	@Test
-	public void testToFile() throws Exception {
-    item = new PathData(".", conf);
+  @Test
+  public void testToFile() throws Exception {
+    PathData item = new PathData(".", conf);
     assertEquals(new File(testDir.toString()), item.toFile());
     assertEquals(new File(testDir.toString()), item.toFile());
-	  item = new PathData("d1/f1", conf);
-	  assertEquals(new File(testDir+"/d1/f1"), item.toFile());
-    item = new PathData(testDir+"/d1/f1", conf);
-    assertEquals(new File(testDir+"/d1/f1"), item.toFile());
-	}
-	
+    item = new PathData("d1/f1", conf);
+    assertEquals(new File(testDir + "/d1/f1"), item.toFile());
+    item = new PathData(testDir + "/d1/f1", conf);
+    assertEquals(new File(testDir + "/d1/f1"), item.toFile());
+  }
+
   @Test
   @Test
   public void testAbsoluteGlob() throws Exception {
   public void testAbsoluteGlob() throws Exception {
     PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
     PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
@@ -147,18 +145,18 @@ public class TestPathData {
 
 
   @Test
   @Test
   public void testWithStringAndConfForBuggyPath() throws Exception {
   public void testWithStringAndConfForBuggyPath() throws Exception {
-    dirString = "file:///tmp";
-    testDir = new Path(dirString);
-    item = new PathData(dirString, conf);
+    String dirString = "file:///tmp";
+    Path tmpDir = new Path(dirString);
+    PathData item = new PathData(dirString, conf);
     // this may fail some day if Path is fixed to not crunch the uri
     // this may fail some day if Path is fixed to not crunch the uri
     // if the authority is null, however we need to test that the PathData
     // if the authority is null, however we need to test that the PathData
     // toString() returns the given string, while Path toString() does
     // toString() returns the given string, while Path toString() does
     // the crunching
     // the crunching
-    assertEquals("file:/tmp", testDir.toString());
-    checkPathData();
+    assertEquals("file:/tmp", tmpDir.toString());
+    checkPathData(dirString, item);
   }
   }
 
 
-  public void checkPathData() throws Exception {
+  public void checkPathData(String dirString, PathData item) throws Exception {
     assertEquals("checking fs", fs, item.fs);
     assertEquals("checking fs", fs, item.fs);
     assertEquals("checking string", dirString, item.toString());
     assertEquals("checking string", dirString, item.toString());
     assertEquals("checking path",
     assertEquals("checking path",

+ 120 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java

@@ -22,10 +22,18 @@ import static org.junit.Assert.*;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
-
+import java.util.Arrays;
+import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -38,6 +46,29 @@ import org.junit.Test;
 public class TestViewFileSystemDelegationTokenSupport {
 public class TestViewFileSystemDelegationTokenSupport {
   
   
   private static final String MOUNT_TABLE_NAME = "vfs-cluster";
   private static final String MOUNT_TABLE_NAME = "vfs-cluster";
+  static Configuration conf;
+  static FileSystem viewFs;
+  static FakeFileSystem fs1;
+  static FakeFileSystem fs2;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = ViewFileSystemTestSetup.createConfig();
+    fs1 = setupFileSystem(new URI("fs1:///"), FakeFileSystem.class);
+    fs2 = setupFileSystem(new URI("fs2:///"), FakeFileSystem.class);
+    viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+  }
+
+  static FakeFileSystem setupFileSystem(URI uri, Class<? extends FileSystem> clazz)
+      throws Exception {
+    String scheme = uri.getScheme();
+    conf.set("fs."+scheme+".impl", clazz.getName());
+    FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
+    // mount each fs twice, will later ensure 1 token/fs
+    ConfigUtil.addLink(conf, "/mounts/"+scheme+"-one", fs.getUri());
+    ConfigUtil.addLink(conf, "/mounts/"+scheme+"-two", fs.getUri());
+    return fs;
+  }
 
 
   /**
   /**
    * Regression test for HADOOP-8408.
    * Regression test for HADOOP-8408.
@@ -69,4 +100,92 @@ public class TestViewFileSystemDelegationTokenSupport {
     assertNull(serviceName);
     assertNull(serviceName);
   }
   }
 
 
+  @Test
+  public void testGetChildFileSystems() throws Exception {
+    assertNull(fs1.getChildFileSystems());
+    assertNull(fs2.getChildFileSystems());    
+    List<FileSystem> children = Arrays.asList(viewFs.getChildFileSystems());
+    assertEquals(2, children.size());
+    assertTrue(children.contains(fs1));
+    assertTrue(children.contains(fs2));
+  }
+  
+  @Test
+  public void testAddDelegationTokens() throws Exception {
+    Credentials creds = new Credentials();
+    Token<?> fs1Tokens[] = addTokensWithCreds(fs1, creds);
+    assertEquals(1, fs1Tokens.length);
+    assertEquals(1, creds.numberOfTokens());
+    Token<?> fs2Tokens[] = addTokensWithCreds(fs2, creds);
+    assertEquals(1, fs2Tokens.length);
+    assertEquals(2, creds.numberOfTokens());
+    
+    Credentials savedCreds = creds;
+    creds = new Credentials();
+    
+    // should get the same set of tokens as explicitly fetched above
+    Token<?> viewFsTokens[] = viewFs.addDelegationTokens("me", creds);
+    assertEquals(2, viewFsTokens.length);
+    assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
+    assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens()); 
+    // should get none, already have all tokens
+    viewFsTokens = viewFs.addDelegationTokens("me", creds);
+    assertEquals(0, viewFsTokens.length);
+    assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
+    assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens());
+  }
+
+  Token<?>[] addTokensWithCreds(FileSystem fs, Credentials creds) throws Exception {
+    Credentials savedCreds;
+    
+    savedCreds = new Credentials(creds);
+    Token<?> tokens[] = fs.addDelegationTokens("me", creds);
+    // test that we got the token we wanted, and that creds were modified
+    assertEquals(1, tokens.length);
+    assertEquals(fs.getCanonicalServiceName(), tokens[0].getService().toString());
+    assertTrue(creds.getAllTokens().contains(tokens[0]));
+    assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
+    assertEquals(savedCreds.numberOfTokens()+1, creds.numberOfTokens());
+    
+    // shouldn't get any new tokens since already in creds
+    savedCreds = new Credentials(creds);
+    Token<?> tokenRefetch[] = fs.addDelegationTokens("me", creds);
+    assertEquals(0, tokenRefetch.length);
+    assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
+    assertEquals(savedCreds.numberOfTokens(), creds.numberOfTokens()); 
+
+    return tokens;
+  }
+
+  static class FakeFileSystem extends RawLocalFileSystem {
+    URI uri;
+
+    public void initialize(URI name, Configuration conf) throws IOException {
+      this.uri = name;
+    }
+
+    @Override
+    public Path getInitialWorkingDirectory() {
+      return new Path("/"); // ctor calls getUri before the uri is inited...
+    }
+    
+    public URI getUri() {
+      return uri;
+    }
+
+    @Override
+    public String getCanonicalServiceName() {
+      return String.valueOf(this.getUri()+"/"+this.hashCode());
+    }
+
+    @Override
+    public Token<?> getDelegationToken(String renewer) throws IOException {
+      Token<?> token = new Token<TokenIdentifier>();
+      token.setService(new Text(getCanonicalServiceName()));
+      return token;
+    }
+
+    @Override
+    public void close() {}
+  }
 }
 }

+ 9 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.fs.viewfs;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 
 
 
 
@@ -137,9 +138,9 @@ public class ViewFileSystemBaseTest {
    */
    */
   @Test
   @Test
   public void testGetDelegationTokens() throws IOException {
   public void testGetDelegationTokens() throws IOException {
-    List<Token<?>> delTokens = 
-        fsView.getDelegationTokens("sanjay");
-    Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size()); 
+    Token<?>[] delTokens = 
+        fsView.addDelegationTokens("sanjay", new Credentials());
+    Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.length); 
   }
   }
   
   
   int getExpectedDelegationTokenCount() {
   int getExpectedDelegationTokenCount() {
@@ -150,29 +151,20 @@ public class ViewFileSystemBaseTest {
   public void testGetDelegationTokensWithCredentials() throws IOException {
   public void testGetDelegationTokensWithCredentials() throws IOException {
     Credentials credentials = new Credentials();
     Credentials credentials = new Credentials();
     List<Token<?>> delTokens =
     List<Token<?>> delTokens =
-        fsView.getDelegationTokens("sanjay", credentials);
+        Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));
 
 
     int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();
     int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();
 
 
     Assert.assertEquals(expectedTokenCount, delTokens.size());
     Assert.assertEquals(expectedTokenCount, delTokens.size());
+    Credentials newCredentials = new Credentials();
     for (int i = 0; i < expectedTokenCount / 2; i++) {
     for (int i = 0; i < expectedTokenCount / 2; i++) {
       Token<?> token = delTokens.get(i);
       Token<?> token = delTokens.get(i);
-      credentials.addToken(token.getService(), token);
+      newCredentials.addToken(token.getService(), token);
     }
     }
 
 
     List<Token<?>> delTokens2 =
     List<Token<?>> delTokens2 =
-        fsView.getDelegationTokens("sanjay", credentials);
-    Assert.assertEquals(expectedTokenCount, delTokens2.size());
-
-    for (int i = 0; i < delTokens2.size(); i++) {
-      for (int j = 0; j < delTokens.size(); j++) {
-        if (delTokens.get(j) == delTokens2.get(i)) {
-          delTokens.remove(j);
-          break;
-        }
-      }
-    }
-    Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
+        Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
+    Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
   }
   }
 
 
   int getExpectedDelegationTokenCountWithCredentials() {
   int getExpectedDelegationTokenCountWithCredentials() {

+ 39 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.io;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
@@ -175,4 +177,41 @@ public class TestIOUtils {
           "Error while reading compressed data", ioe);
           "Error while reading compressed data", ioe);
     }
     }
   }
   }
+  
+  @Test
+  public void testSkipFully() throws IOException {
+    byte inArray[] = new byte[] {0, 1, 2, 3, 4};
+    ByteArrayInputStream in = new ByteArrayInputStream(inArray);
+    try {
+      in.mark(inArray.length);
+      IOUtils.skipFully(in, 2);
+      IOUtils.skipFully(in, 2);
+      try {
+        IOUtils.skipFully(in, 2);
+        fail("expected to get a PrematureEOFException");
+      } catch (EOFException e) {
+        assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+            "after skipping 1 byte(s).");
+      }
+      in.reset();
+      try {
+        IOUtils.skipFully(in, 20);
+        fail("expected to get a PrematureEOFException");
+      } catch (EOFException e) {
+        assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+            "after skipping 5 byte(s).");
+      }
+      in.reset();
+      IOUtils.skipFully(in, 5);
+      try {
+        IOUtils.skipFully(in, 10);
+        fail("expected to get a PrematureEOFException");
+      } catch (EOFException e) {
+        assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+            "after skipping 0 byte(s).");
+      }
+    } finally {
+      in.close();
+    }
+  }
 }
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java

@@ -83,6 +83,7 @@ public class TestWritableName extends TestCase {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     String altName = testName + ".alt";
     String altName = testName + ".alt";
 
 
+    WritableName.setName(SimpleWritable.class, testName);
     WritableName.addName(SimpleWritable.class, altName);
     WritableName.addName(SimpleWritable.class, altName);
 
 
     Class<?> test = WritableName.getClass(altName, conf);
     Class<?> test = WritableName.getClass(altName, conf);

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
+import java.io.IOException;
 import java.net.BindException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
@@ -115,4 +116,16 @@ public class TestServer {
       socket.close();
       socket.close();
     }
     }
   }
   }
+  
+  @Test
+  public void testExceptionsHandler() throws IOException {
+    Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
+    handler.addTerseExceptions(IOException.class);
+    handler.addTerseExceptions(RpcServerException.class, IpcException.class);
+
+    assertTrue(handler.isTerse(IOException.class));
+    assertTrue(handler.isTerse(RpcServerException.class));
+    assertTrue(handler.isTerse(IpcException.class));
+    assertFalse(handler.isTerse(RpcClientException.class));
+  }
 }
 }

+ 18 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java

@@ -213,5 +213,22 @@ public class TestCredentials {
     // new token & secret should be added
     // new token & secret should be added
     assertEquals(token[2], creds.getToken(service[2]));
     assertEquals(token[2], creds.getToken(service[2]));
     assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
     assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
- }
+  }
+  
+  @Test
+  public void testAddTokensToUGI() {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("someone");
+    Credentials creds = new Credentials();
+    
+    for (int i=0; i < service.length; i++) {
+      creds.addToken(service[i], token[i]);
+    }
+    creds.addTokensToUGI(ugi);
+
+    creds = ugi.getCredentials();
+    for (int i=0; i < service.length; i++) {
+      assertSame(token[i], creds.getToken(service[i]));
+    }
+    assertEquals(service.length, creds.numberOfTokens());
+  }
 }
 }

+ 105 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java

@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+public class TestGroupFallback {
+  public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
+
+  @Test
+  public void testGroupShell() throws Exception {
+    Logger.getRootLogger().setLevel(Level.DEBUG);
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
+
+    Groups groups = new Groups(conf);
+
+    String username = System.getProperty("user.name");
+    List<String> groupList = groups.getGroups(username);
+
+    LOG.info(username + " has GROUPS: " + groupList.toString());
+    assertTrue(groupList.size() > 0);
+  }
+
+  @Test
+  public void testNetgroupShell() throws Exception {
+    Logger.getRootLogger().setLevel(Level.DEBUG);
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        "org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
+
+    Groups groups = new Groups(conf);
+
+    String username = System.getProperty("user.name");
+    List<String> groupList = groups.getGroups(username);
+
+    LOG.info(username + " has GROUPS: " + groupList.toString());
+    assertTrue(groupList.size() > 0);
+  }
+
+  @Test
+  public void testGroupWithFallback() throws Exception {
+    LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
+        "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
+        " test the fall back functionality");
+    Logger.getRootLogger().setLevel(Level.DEBUG);
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
+
+    Groups groups = new Groups(conf);
+
+    String username = System.getProperty("user.name");
+    List<String> groupList = groups.getGroups(username);
+
+    LOG.info(username + " has GROUPS: " + groupList.toString());
+    assertTrue(groupList.size() > 0);
+  }
+
+  @Test
+  public void testNetgroupWithFallback() throws Exception {
+    LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
+        "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
+        " test the fall back functionality");
+    Logger.getRootLogger().setLevel(Level.DEBUG);
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        "org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
+
+    Groups groups = new Groups(conf);
+
+    String username = System.getProperty("user.name");
+    List<String> groupList = groups.getGroups(username);
+
+    LOG.info(username + " has GROUPS: " + groupList.toString());
+    assertTrue(groupList.size() > 0);
+  }
+
+}

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java

@@ -137,6 +137,7 @@ public class TestSecurityUtil {
 
 
   @Test
   @Test
   public void testBuildDTServiceName() {
   public void testBuildDTServiceName() {
+    SecurityUtil.setTokenServiceUseIp(true);
     assertEquals("127.0.0.1:123",
     assertEquals("127.0.0.1:123",
         SecurityUtil.buildDTServiceName(URI.create("test://LocalHost"), 123)
         SecurityUtil.buildDTServiceName(URI.create("test://LocalHost"), 123)
     );
     );
@@ -153,6 +154,7 @@ public class TestSecurityUtil {
   
   
   @Test
   @Test
   public void testBuildTokenServiceSockAddr() {
   public void testBuildTokenServiceSockAddr() {
+    SecurityUtil.setTokenServiceUseIp(true);
     assertEquals("127.0.0.1:123",
     assertEquals("127.0.0.1:123",
         SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost", 123)).toString()
         SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost", 123)).toString()
     );
     );

+ 79 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -19,8 +19,7 @@ package org.apache.hadoop.security;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import org.junit.*;
 import org.junit.*;
 
 
-import org.mockito.Mockito;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.*;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.IOException;
@@ -35,6 +34,7 @@ import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginContext;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -194,8 +194,6 @@ public class TestUserGroupInformation {
   public void testEqualsWithRealUser() throws Exception {
   public void testEqualsWithRealUser() throws Exception {
     UserGroupInformation realUgi1 = UserGroupInformation.createUserForTesting(
     UserGroupInformation realUgi1 = UserGroupInformation.createUserForTesting(
         "RealUser", GROUP_NAMES);
         "RealUser", GROUP_NAMES);
-    UserGroupInformation realUgi2 = UserGroupInformation.createUserForTesting(
-        "RealUser", GROUP_NAMES);
     UserGroupInformation proxyUgi1 = UserGroupInformation.createProxyUser(
     UserGroupInformation proxyUgi1 = UserGroupInformation.createProxyUser(
         USER_NAME, realUgi1);
         USER_NAME, realUgi1);
     UserGroupInformation proxyUgi2 =
     UserGroupInformation proxyUgi2 =
@@ -213,7 +211,82 @@ public class TestUserGroupInformation {
     assertArrayEquals(new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME},
     assertArrayEquals(new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME},
                       uugi.getGroupNames());
                       uugi.getGroupNames());
   }
   }
+
+  @SuppressWarnings("unchecked") // from Mockito mocks
+  @Test
+  public <T extends TokenIdentifier> void testAddToken() throws Exception {
+    UserGroupInformation ugi = 
+        UserGroupInformation.createRemoteUser("someone"); 
+    
+    Token<T> t1 = mock(Token.class);
+    Token<T> t2 = mock(Token.class);
+    Token<T> t3 = mock(Token.class);
+    
+    // add token to ugi
+    ugi.addToken(t1);
+    checkTokens(ugi, t1);
+
+    // replace token t1 with t2 - with same key (null)
+    ugi.addToken(t2);
+    checkTokens(ugi, t2);
+    
+    // change t1 service and add token
+    when(t1.getService()).thenReturn(new Text("t1"));
+    ugi.addToken(t1);
+    checkTokens(ugi, t1, t2);
+  
+    // overwrite t1 token with t3 - same key (!null)
+    when(t3.getService()).thenReturn(new Text("t1"));
+    ugi.addToken(t3);
+    checkTokens(ugi, t2, t3);
+
+    // just try to re-add with new name
+    when(t1.getService()).thenReturn(new Text("t1.1"));
+    ugi.addToken(t1);
+    checkTokens(ugi, t1, t2, t3);    
+
+    // just try to re-add with new name again
+    ugi.addToken(t1);
+    checkTokens(ugi, t1, t2, t3);    
+  }
   
   
+  private void checkTokens(UserGroupInformation ugi, Token<?> ... tokens) {
+    // check the ugi's token collection
+    Collection<Token<?>> ugiTokens = ugi.getTokens();
+    for (Token<?> t : tokens) {
+      assertTrue(ugiTokens.contains(t));
+    }
+    assertEquals(tokens.length, ugiTokens.size());
+
+    // check the ugi's credentials
+    Credentials ugiCreds = ugi.getCredentials();
+    for (Token<?> t : tokens) {
+      assertSame(t, ugiCreds.getToken(t.getService()));
+    }
+    assertEquals(tokens.length, ugiCreds.numberOfTokens());
+  }
+
+  @SuppressWarnings("unchecked") // from Mockito mocks
+  @Test
+  public <T extends TokenIdentifier> void testAddNamedToken() throws Exception {
+    UserGroupInformation ugi = 
+        UserGroupInformation.createRemoteUser("someone"); 
+    
+    Token<T> t1 = mock(Token.class);
+    Text service1 = new Text("t1");
+    Text service2 = new Text("t2");
+    when(t1.getService()).thenReturn(service1);
+    
+    // add token
+    ugi.addToken(service1, t1);
+    assertSame(t1, ugi.getCredentials().getToken(service1));
+
+    // add token with another name
+    ugi.addToken(service2, t1);
+    assertSame(t1, ugi.getCredentials().getToken(service1));
+    assertSame(t1, ugi.getCredentials().getToken(service2));
+  }
+
   @SuppressWarnings("unchecked") // from Mockito mocks
   @SuppressWarnings("unchecked") // from Mockito mocks
   @Test
   @Test
   public <T extends TokenIdentifier> void testUGITokens() throws Exception {
   public <T extends TokenIdentifier> void testUGITokens() throws Exception {
@@ -221,7 +294,9 @@ public class TestUserGroupInformation {
       UserGroupInformation.createUserForTesting("TheDoctor", 
       UserGroupInformation.createUserForTesting("TheDoctor", 
                                                 new String [] { "TheTARDIS"});
                                                 new String [] { "TheTARDIS"});
     Token<T> t1 = mock(Token.class);
     Token<T> t1 = mock(Token.class);
+    when(t1.getService()).thenReturn(new Text("t1"));
     Token<T> t2 = mock(Token.class);
     Token<T> t2 = mock(Token.class);
+    when(t2.getService()).thenReturn(new Text("t2"));
     
     
     ugi.addToken(t1);
     ugi.addToken(t1);
     ugi.addToken(t2);
     ugi.addToken(t2);

+ 10 - 11
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java

@@ -35,13 +35,13 @@ public class TestDataChecksum {
   private static final int DATA_TRAILER_IN_BUFFER = 3;
   private static final int DATA_TRAILER_IN_BUFFER = 3;
   
   
   private static final int BYTES_PER_CHUNK = 512;
   private static final int BYTES_PER_CHUNK = 512;
-  private static final int CHECKSUM_TYPES[] = new int[] {
-    DataChecksum.CHECKSUM_CRC32, DataChecksum.CHECKSUM_CRC32C
+  private static final DataChecksum.Type CHECKSUM_TYPES[] = {
+    DataChecksum.Type.CRC32, DataChecksum.Type.CRC32C
   };
   };
   
   
   @Test
   @Test
   public void testBulkOps() throws Exception {
   public void testBulkOps() throws Exception {
-    for (int type : CHECKSUM_TYPES) {
+    for (DataChecksum.Type type : CHECKSUM_TYPES) {
       System.err.println(
       System.err.println(
           "---- beginning tests with checksum type " + type  + "----");
           "---- beginning tests with checksum type " + type  + "----");
       DataChecksum checksum = DataChecksum.newDataChecksum(
       DataChecksum checksum = DataChecksum.newDataChecksum(
@@ -118,21 +118,20 @@ public class TestDataChecksum {
   @Test
   @Test
   public void testEquality() {
   public void testEquality() {
     assertEquals(
     assertEquals(
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512),
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512),
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
     assertFalse(
     assertFalse(
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 1024)));
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).equals(
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 1024)));
     assertFalse(
     assertFalse(
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512)));        
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).equals(
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512)));        
   }
   }
   
   
   @Test
   @Test
   public void testToString() {
   public void testToString() {
     assertEquals("DataChecksum(type=CRC32, chunkSize=512)",
     assertEquals("DataChecksum(type=CRC32, chunkSize=512)",
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512)
-          .toString());
+        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).toString());
   }
   }
 
 
   private static void corruptBufferOffset(ByteBuffer buf, int offset) {
   private static void corruptBufferOffset(ByteBuffer buf, int offset) {

+ 141 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java

@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.ByteArrayInputStream;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.util.LineReader;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class TestLineReader {
+  private LineReader lineReader;
+  private String TestData;
+  private String Delimiter;
+  private Text line;
+
+  @Test
+  public void testCustomDelimiter() throws Exception {
+    /* TEST_1
+     * The test scenario is the tail of the buffer
+     * equals the starting character/s of delimiter
+     * 
+     * The Test Data is such that,
+     *   
+     * 1) we will have "</entity>" as delimiter  
+     *  
+     * 2) The tail of the current buffer would be "</"
+     *    which matches with the starting character sequence of delimiter.
+     *    
+     * 3) The Head of the next buffer would be   "id>" 
+     *    which does NOT match with the remaining characters of delimiter.
+     *   
+     * 4) Input data would be prefixed by char 'a' 
+     *    about numberOfCharToFillTheBuffer times.
+     *    So that, one iteration to buffer the input data,
+     *    would end at '</' ie equals starting 2 char of delimiter  
+     *     
+     * 5) For this we would take BufferSize as 64 * 1024;
+     * 
+     * Check Condition
+     *  In the second key value pair, the value should contain 
+     *  "</"  from currentToken and
+     *  "id>" from next token 
+     */  
+    
+    Delimiter="</entity>"; 
+    
+    String CurrentBufferTailToken=
+        "</entity><entity><id>Gelesh</";
+    // Ending part of Input Data Buffer
+    // It contains '</' ie delimiter character 
+    
+    String NextBufferHeadToken=
+        "id><name>Omathil</name></entity>";
+    // Supposing the start of next buffer is this
+    
+    String Expected = 
+        (CurrentBufferTailToken+NextBufferHeadToken)
+        .replace(Delimiter, "");                       
+    // Expected ,must capture from both the buffer, excluding Delimiter
+  
+    String TestPartOfInput = CurrentBufferTailToken+NextBufferHeadToken;
+  
+    int BufferSize=64 * 1024;
+    int numberOfCharToFillTheBuffer=BufferSize-CurrentBufferTailToken.length();
+    StringBuilder fillerString=new StringBuilder();
+    for (int i=0;i<numberOfCharToFillTheBuffer;i++) {  
+      fillerString.append('a'); // char 'a' as a filler for the test string
+    }
+
+    TestData = fillerString + TestPartOfInput;
+    lineReader = new LineReader(
+        new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
+    
+    line = new Text();
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals(fillerString.toString(),line.toString());
+    
+    lineReader.readLine(line);
+    Assert.assertEquals(Expected, line.toString());
+    
+    /*TEST_2
+     * The test scenario is such that,
+     * the character/s preceding the delimiter,
+     * equals the starting character/s of delimiter
+     */
+    
+    Delimiter = "record";
+    StringBuilder TestStringBuilder = new StringBuilder();
+    
+    TestStringBuilder.append(Delimiter+"Kerala ");
+    TestStringBuilder.append(Delimiter+"Bangalore");
+    TestStringBuilder.append(Delimiter+" North Korea");
+    TestStringBuilder.append(Delimiter+Delimiter+
+                        "Guantanamo");
+    TestStringBuilder.append(Delimiter+"ecord"+"recor"+"core"); //~EOF with 're'
+    
+    TestData=TestStringBuilder.toString();
+    
+    lineReader = new LineReader(
+        new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals("",line.toString());
+    lineReader.readLine(line); 
+    Assert.assertEquals("Kerala ",line.toString());
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals("Bangalore",line.toString());
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals(" North Korea",line.toString());
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals("",line.toString());
+    lineReader.readLine(line); 
+    Assert.assertEquals("Guantanamo",line.toString());
+    
+    lineReader.readLine(line); 
+    Assert.assertEquals(("ecord"+"recor"+"core"),line.toString());
+  }
+}

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -60,8 +60,13 @@
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <scope>provided</scope>
+      <artifactId>hadoop-auth</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>com.sun.jersey</groupId>
       <groupId>com.sun.jersey</groupId>
@@ -74,18 +79,8 @@
       <scope>provided</scope>
       <scope>provided</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.jdom</groupId>
-      <artifactId>jdom</artifactId>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -93,6 +88,11 @@
       <artifactId>json-simple</artifactId>
       <artifactId>json-simple</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <artifactId>hadoop-common</artifactId>
@@ -248,7 +248,7 @@
     <dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <artifactId>slf4j-log4j12</artifactId>
-      <scope>compile</scope>
+      <scope>runtime</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
 
 

+ 0 - 14
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -863,7 +863,6 @@ public class HttpFSFileSystem extends FileSystem
 
 
 
 
   @Override
   @Override
-  @SuppressWarnings("deprecation")
   public Token<?> getDelegationToken(final String renewer)
   public Token<?> getDelegationToken(final String renewer)
     throws IOException {
     throws IOException {
     return doAsRealUserIfNecessary(new Callable<Token<?>>() {
     return doAsRealUserIfNecessary(new Callable<Token<?>>() {
@@ -875,19 +874,6 @@ public class HttpFSFileSystem extends FileSystem
     });
     });
   }
   }
 
 
-
-  @Override
-  public List<Token<?>> getDelegationTokens(final String renewer)
-    throws IOException {
-    return doAsRealUserIfNecessary(new Callable<List<Token<?>>>() {
-      @Override
-      public List<Token<?>> call() throws Exception {
-        return HttpFSKerberosAuthenticator.
-          getDelegationTokens(uri, httpFSAddr, authToken, renewer);
-      }
-    });
-  }
-
   public long renewDelegationToken(final Token<?> token) throws IOException {
   public long renewDelegationToken(final Token<?> token) throws IOException {
     return doAsRealUserIfNecessary(new Callable<Long>() {
     return doAsRealUserIfNecessary(new Callable<Long>() {
       @Override
       @Override

+ 14 - 51
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java

@@ -66,7 +66,6 @@ public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
   public static final String RENEWER_PARAM = "renewer";
   public static final String RENEWER_PARAM = "renewer";
   public static final String TOKEN_KIND = "HTTPFS_DELEGATION_TOKEN";
   public static final String TOKEN_KIND = "HTTPFS_DELEGATION_TOKEN";
   public static final String DELEGATION_TOKEN_JSON = "Token";
   public static final String DELEGATION_TOKEN_JSON = "Token";
-  public static final String DELEGATION_TOKENS_JSON = "Tokens";
   public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
   public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
   public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
   public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
 
 
@@ -76,7 +75,6 @@ public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   public static enum DelegationTokenOperation {
   public static enum DelegationTokenOperation {
     GETDELEGATIONTOKEN(HTTP_GET, true),
     GETDELEGATIONTOKEN(HTTP_GET, true),
-    GETDELEGATIONTOKENS(HTTP_GET, true),
     RENEWDELEGATIONTOKEN(HTTP_PUT, true),
     RENEWDELEGATIONTOKEN(HTTP_PUT, true),
     CANCELDELEGATIONTOKEN(HTTP_PUT, false);
     CANCELDELEGATIONTOKEN(HTTP_PUT, false);
 
 
@@ -121,10 +119,11 @@ public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
 
 
   public static final String OP_PARAM = "op";
   public static final String OP_PARAM = "op";
 
 
-  private static List<Token<?>> getDelegationTokens(URI fsURI,
-    InetSocketAddress httpFSAddr, DelegationTokenOperation op,
-    AuthenticatedURL.Token token, String renewer)
-    throws IOException {
+  public static Token<?> getDelegationToken(URI fsURI,
+    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
+    String renewer) throws IOException {
+    DelegationTokenOperation op = 
+      DelegationTokenOperation.GETDELEGATIONTOKEN;
     Map<String, String> params = new HashMap<String, String>();
     Map<String, String> params = new HashMap<String, String>();
     params.put(OP_PARAM, op.toString());
     params.put(OP_PARAM, op.toString());
     params.put(RENEWER_PARAM,renewer);
     params.put(RENEWER_PARAM,renewer);
@@ -135,56 +134,20 @@ public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
       HttpURLConnection conn = aUrl.openConnection(url, token);
       HttpURLConnection conn = aUrl.openConnection(url, token);
       conn.setRequestMethod(op.getHttpMethod());
       conn.setRequestMethod(op.getHttpMethod());
       HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
       HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-      List<String> list = new ArrayList<String>();
-      if (op == DelegationTokenOperation.GETDELEGATIONTOKEN) {
-        JSONObject json = (JSONObject) ((JSONObject)
-          HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
-        String tokenStr = (String)
-          json.get(DELEGATION_TOKEN_URL_STRING_JSON);
-        list.add(tokenStr);
-      }
-      else if (op == DelegationTokenOperation.GETDELEGATIONTOKENS) {
-        JSONObject json = (JSONObject) ((JSONObject)
-          HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKENS_JSON);
-        JSONArray array = (JSONArray) json.get(DELEGATION_TOKEN_JSON);
-        for (Object element : array) {
-          String tokenStr = (String)
-            ((Map) element).get(DELEGATION_TOKEN_URL_STRING_JSON);
-          list.add(tokenStr);
-        }
-
-      } else {
-        throw new IllegalArgumentException("Invalid operation: " +
-                                           op.toString());
-      }
-      List<Token<?>> dTokens = new ArrayList<Token<?>>();
-      for (String tokenStr : list) {
-        Token<AbstractDelegationTokenIdentifier> dToken =
-          new Token<AbstractDelegationTokenIdentifier>();
-        dToken.decodeFromUrlString(tokenStr);
-        dTokens.add(dToken);
-        SecurityUtil.setTokenService(dToken, httpFSAddr);
-      }
-      return dTokens;
+      JSONObject json = (JSONObject) ((JSONObject)
+        HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
+      String tokenStr = (String)
+        json.get(DELEGATION_TOKEN_URL_STRING_JSON);
+      Token<AbstractDelegationTokenIdentifier> dToken =
+        new Token<AbstractDelegationTokenIdentifier>();
+      dToken.decodeFromUrlString(tokenStr);
+      SecurityUtil.setTokenService(dToken, httpFSAddr);
+      return dToken;
     } catch (AuthenticationException ex) {
     } catch (AuthenticationException ex) {
       throw new IOException(ex.toString(), ex);
       throw new IOException(ex.toString(), ex);
     }
     }
   }
   }
 
 
-  public static List<Token<?>> getDelegationTokens(URI fsURI,
-    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
-    String renewer) throws IOException {
-    return getDelegationTokens(fsURI, httpFSAddr,
-      DelegationTokenOperation.GETDELEGATIONTOKENS, token, renewer);
-  }
-
-  public static Token<?> getDelegationToken(URI fsURI,
-    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
-    String renewer) throws IOException {
-    return getDelegationTokens(fsURI, httpFSAddr,
-      DelegationTokenOperation.GETDELEGATIONTOKENS, token, renewer).get(0);
-  }
-
   public static long renewDelegationToken(URI fsURI,
   public static long renewDelegationToken(URI fsURI,
     AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
     AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
     Map<String, String> params = new HashMap<String, String>();

+ 1 - 25
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java

@@ -63,8 +63,6 @@ public class HttpFSKerberosAuthenticationHandler
   static {
   static {
     DELEGATION_TOKEN_OPS.add(
     DELEGATION_TOKEN_OPS.add(
       DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
       DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
-    DELEGATION_TOKEN_OPS.add(
-      DelegationTokenOperation.GETDELEGATIONTOKENS.toString());
     DELEGATION_TOKEN_OPS.add(
     DELEGATION_TOKEN_OPS.add(
       DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
       DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
     DELEGATION_TOKEN_OPS.add(
     DELEGATION_TOKEN_OPS.add(
@@ -111,7 +109,6 @@ public class HttpFSKerberosAuthenticationHandler
             Map map = null;
             Map map = null;
             switch (dtOp) {
             switch (dtOp) {
               case GETDELEGATIONTOKEN:
               case GETDELEGATIONTOKEN:
-              case GETDELEGATIONTOKENS:
                 String renewerParam =
                 String renewerParam =
                   request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
                   request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
                 if (renewerParam == null) {
                 if (renewerParam == null) {
@@ -119,11 +116,7 @@ public class HttpFSKerberosAuthenticationHandler
                 }
                 }
                 Token<?> dToken = tokenManager.createToken(
                 Token<?> dToken = tokenManager.createToken(
                   UserGroupInformation.getCurrentUser(), renewerParam);
                   UserGroupInformation.getCurrentUser(), renewerParam);
-                if (dtOp == DelegationTokenOperation.GETDELEGATIONTOKEN) {
-                  map = delegationTokenToJSON(dToken);
-                } else {
-                  map = delegationTokensToJSON(Arrays.asList((Token)dToken));
-                }
+                map = delegationTokenToJSON(dToken);
                 break;
                 break;
               case RENEWDELEGATIONTOKEN:
               case RENEWDELEGATIONTOKEN:
               case CANCELDELEGATIONTOKEN:
               case CANCELDELEGATIONTOKEN:
@@ -191,23 +184,6 @@ public class HttpFSKerberosAuthenticationHandler
     return response;
     return response;
   }
   }
   
   
-  @SuppressWarnings("unchecked")
-  private static Map delegationTokensToJSON(List<Token> tokens)
-    throws IOException {
-    List list = new ArrayList();
-    for (Token token : tokens) {
-      Map map = new HashMap();
-      map.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
-              token.encodeToUrlString());
-      list.add(map);
-    }
-    Map map = new HashMap();
-    map.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, list);
-    Map response = new LinkedHashMap();
-    response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKENS_JSON, map);
-    return response;
-  }
-
   /**
   /**
    * Authenticates a request looking for the <code>delegation</code>
    * Authenticates a request looking for the <code>delegation</code>
    * query-string parameter and verifying it is a valid token. If there is not
    * query-string parameter and verifying it is a valid token. If there is not

+ 6 - 21
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java

@@ -68,10 +68,8 @@ public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
 
 
       testNonManagementOperation(handler);
       testNonManagementOperation(handler);
       testManagementOperationErrors(handler);
       testManagementOperationErrors(handler);
-      testGetToken(handler, false, null);
-      testGetToken(handler, true, null);
-      testGetToken(handler, false, "foo");
-      testGetToken(handler, true, "foo");
+      testGetToken(handler, null);
+      testGetToken(handler, "foo");
       testCancelToken(handler);
       testCancelToken(handler);
       testRenewToken(handler);
       testRenewToken(handler);
 
 
@@ -115,12 +113,9 @@ public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
       Mockito.contains("requires SPNEGO"));
       Mockito.contains("requires SPNEGO"));
   }
   }
 
 
-  private void testGetToken(AuthenticationHandler handler, boolean tokens,
-                            String renewer)
+  private void testGetToken(AuthenticationHandler handler, String renewer)
     throws Exception {
     throws Exception {
-    DelegationTokenOperation op =
-      (tokens) ? DelegationTokenOperation.GETDELEGATIONTOKENS
-               : DelegationTokenOperation.GETDELEGATIONTOKEN;
+    DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
     Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
     Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
@@ -148,23 +143,13 @@ public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
     Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
     Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
     pwriter.close();
     pwriter.close();
     String responseOutput = writer.toString();
     String responseOutput = writer.toString();
-    String tokenLabel = (tokens)
-                        ? HttpFSKerberosAuthenticator.DELEGATION_TOKENS_JSON
-                        : HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
-    if (tokens) {
-      Assert.assertTrue(responseOutput.contains(tokenLabel));
-    } else {
-      Assert.assertTrue(responseOutput.contains(tokenLabel));
-    }
+    String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
+    Assert.assertTrue(responseOutput.contains(tokenLabel));
     Assert.assertTrue(responseOutput.contains(
     Assert.assertTrue(responseOutput.contains(
       HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
       HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
     JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
     JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
     json = (JSONObject) json.get(tokenLabel);
     json = (JSONObject) json.get(tokenLabel);
     String tokenStr;
     String tokenStr;
-    if (tokens) {
-      json = (JSONObject) ((JSONArray)
-        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON)).get(0);
-    }
     tokenStr = (String)
     tokenStr = (String)
       json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
       json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
     Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
     Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java

@@ -222,10 +222,11 @@ public class TestHttpFSWithKerberos extends HFSTestCase {
     URI uri = new URI( "webhdfs://" +
     URI uri = new URI( "webhdfs://" +
                        TestJettyHelper.getJettyURL().toURI().getAuthority());
                        TestJettyHelper.getJettyURL().toURI().getAuthority());
     FileSystem fs = FileSystem.get(uri, conf);
     FileSystem fs = FileSystem.get(uri, conf);
-    Token<?> token = fs.getDelegationToken("foo");
+    Token<?> tokens[] = fs.addDelegationTokens("foo", null);
     fs.close();
     fs.close();
+    Assert.assertEquals(1, tokens.length);
     fs = FileSystem.get(uri, conf);
     fs = FileSystem.get(uri, conf);
-    ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(token);
+    ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
     fs.listStatus(new Path("/"));
     fs.listStatus(new Path("/"));
     fs.close();
     fs.close();
   }
   }

+ 59 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -120,6 +120,12 @@ Trunk (unreleased changes)
     HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
     HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
     (Andrew Purtell via suresh)
     (Andrew Purtell via suresh)
 
 
+    HDFS-3817. Avoid printing SafeModeException stack trace.
+    (Brandon Li via suresh)
+
+    HDFS-3819. Should check whether invalidate work percentage default value is 
+    not greater than 1.0f. (Jing Zhao via jitendra)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -191,6 +197,14 @@ Trunk (unreleased changes)
 
 
     HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
     HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
 
 
+    HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
+    (Jing Zhao via suresh)
+
+    HDFS-3834. Remove unused static fields NAME, DESCRIPTION and Usage from
+    Command. (Jing Zhao via suresh)
+
+    HDFS-3678. Edit log files are never being purged from 2NN. (atm)
+
 Branch-2 ( Unreleased changes )
 Branch-2 ( Unreleased changes )
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -399,6 +413,22 @@ Branch-2 ( Unreleased changes )
 
 
     HDFS-3796. Speed up edit log tests by avoiding fsync() (todd)
     HDFS-3796. Speed up edit log tests by avoiding fsync() (todd)
 
 
+    HDFS-2963. Console Output is confusing while executing metasave
+    (dfsadmin command). (Andrew Wang via eli)
+
+    HDFS-3672. Expose disk-location information for blocks to enable better
+    scheduling. (Andrew Wang via atm)
+
+    HDFS-2727. libhdfs should get the default block size from the server.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-2686. Remove DistributedUpgrade related code. (suresh)
+
+    HDFS-3832. Remove protocol methods related to DistributedUpgrade. (suresh)
+
+    HDFS-3177. Update DFSClient and DataXceiver to handle different checkum
+    types in file checksum computation.  (Kihwal Lee via szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-2982. Startup performance suffers when there are many edit log
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -412,6 +442,9 @@ Branch-2 ( Unreleased changes )
 
 
     HDFS-3697. Enable fadvise readahead by default. (todd)
     HDFS-3697. Enable fadvise readahead by default. (todd)
 
 
+    HDFS-2421. Improve the concurrency of SerialNumberMap in NameNode.
+    (Jing Zhao and Weiyan Wang via szetszwo)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-3385. The last block of INodeFileUnderConstruction is not
     HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -614,6 +647,32 @@ Branch-2 ( Unreleased changes )
     header when offset is specified and length is omitted.
     header when offset is specified and length is omitted.
     (Ravi Prakash via szetszwo)
     (Ravi Prakash via szetszwo)
 
 
+    HDFS-3048. Small race in BlockManager#close. (Andy Isaacson via eli)
+
+    HDFS-3194. DataNode block scanner is running too frequently.
+    (Andy Isaacson via eli)
+
+    HDFS-3808. fuse_dfs: postpone libhdfs intialization until after fork.
+    (Colin Patrick McCabe via atm)
+
+    HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header
+    when chunked transfer-encoding is used.  (szetszwo)
+
+    HDFS-3816. Invalidate work percentage default value should be 0.32f
+    instead of 32. (Jing Zhao via suresh)
+
+    HDFS-3707. TestFSInputChecker: improper use of skip.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3830. test_libhdfs_threaded: use forceNewInstance.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3835. Long-lived 2NN cannot perform a checkpoint if security is
+    enabled and the NN restarts with outstanding delegation tokens. (atm)
+
+    HDFS-3715. Fix TestFileCreation#testFileCreationNamenodeRestart.
+    (Andrew Whang via eli)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

+ 79 - 25
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -39,18 +39,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
 
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
-      <groupId>org.aspectj</groupId>
-      <artifactId>aspectjtools</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.aspectj</groupId>
-      <artifactId>aspectjrt</artifactId>
-      <scope>test</scope>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
+      <artifactId>hadoop-auth</artifactId>
       <scope>provided</scope>
       <scope>provided</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -64,6 +59,58 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
       <scope>test</scope>
       <type>test-jar</type>
       <type>test-jar</type>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.2</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>commons-logging</groupId>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
       <artifactId>commons-logging</artifactId>
@@ -74,6 +121,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>commons-daemon</artifactId>
       <artifactId>commons-daemon</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>javax.servlet.jsp</groupId>
+      <artifactId>jsp-api</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>log4j</groupId>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <artifactId>log4j</artifactId>
@@ -85,8 +137,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>org.apache.avro</groupId>
-      <artifactId>avro</artifactId>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
@@ -100,27 +152,29 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>org.apache.ant</groupId>
-      <artifactId>ant</artifactId>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
       <scope>provided</scope>
       <scope>provided</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <scope>provided</scope>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>provided</scope>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <type>test-jar</type>
-      <scope>test</scope>
+      <groupId>tomcat</groupId>
+      <artifactId>jasper-runtime</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>xmlenc</groupId>
+      <artifactId>xmlenc</artifactId>
+      <scope>compile</scope>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
 
 

+ 13 - 3
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -37,9 +37,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
 
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <scope>provided</scope>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <scope>compile</scope>
     </dependency>
     </dependency>
     <dependency> 
     <dependency> 
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
@@ -68,6 +68,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>bookkeeper-server</artifactId>
       <artifactId>bookkeeper-server</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>junit</groupId>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <artifactId>junit</artifactId>

+ 51 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/BlockStorageLocation.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Wrapper for {@link BlockLocation} that also adds {@link VolumeId} volume
+ * location information for each replica.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Public
+public class BlockStorageLocation extends BlockLocation {
+
+  private final VolumeId[] volumeIds;
+
+  public BlockStorageLocation(BlockLocation loc, VolumeId[] volumeIds)
+      throws IOException {
+    // Initialize with data from passed in BlockLocation
+    super(loc.getNames(), loc.getHosts(), loc.getTopologyPaths(), loc
+        .getOffset(), loc.getLength(), loc.isCorrupt());
+    this.volumeIds = volumeIds;
+  }
+
+  /**
+   * Gets the list of {@link VolumeId} corresponding to the block's replicas.
+   * 
+   * @return volumeIds list of VolumeId for the block's replicas
+   */
+  public VolumeId[] getVolumeIds() {
+    return volumeIds;
+  }
+}

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -93,10 +94,10 @@ public class Hdfs extends AbstractFileSystem {
   public HdfsDataOutputStream createInternal(Path f,
   public HdfsDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bufferSize, short replication, long blockSize, Progressable progress,
-      int bytesPerChecksum, boolean createParent) throws IOException {
+      ChecksumOpt checksumOpt, boolean createParent) throws IOException {
     return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
     return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
         absolutePermission, createFlag, createParent, replication, blockSize,
         absolutePermission, createFlag, createParent, replication, blockSize,
-        progress, bufferSize, bytesPerChecksum), getStatistics());
+        progress, bufferSize, checksumOpt), getStatistics());
   }
   }
 
 
   @Override
   @Override

+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+
+/**
+ * Wrapper for {@link BlockLocation} that also includes a {@link LocatedBlock},
+ * allowing more detailed queries to the datanode about a block.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HdfsBlockLocation extends BlockLocation {
+
+  private final LocatedBlock block;
+  
+  public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) 
+      throws IOException {
+    // Initialize with data from passed in BlockLocation
+    super(loc.getNames(), loc.getHosts(), loc.getTopologyPaths(), 
+        loc.getOffset(), loc.getLength(), loc.isCorrupt());
+    this.block = block;
+  }
+  
+  public LocatedBlock getLocatedBlock() {
+    return block;
+  }
+}

+ 74 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java

@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * HDFS-specific volume identifier which implements {@link VolumeId}. Can be
+ * used to differentiate between the data directories on a single datanode. This
+ * identifier is only unique on a per-datanode basis.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Public
+public class HdfsVolumeId implements VolumeId {
+
+  private final byte id;
+  private final boolean isValid;
+
+  public HdfsVolumeId(byte id, boolean isValid) {
+    this.id = id;
+    this.isValid = isValid;
+  }
+
+  @Override
+  public boolean isValid() {
+    return isValid;
+  }
+
+  @Override
+  public int compareTo(VolumeId arg0) {
+    return hashCode() - arg0.hashCode();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder().append(id).toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null || obj.getClass() != getClass()) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+
+    HdfsVolumeId that = (HdfsVolumeId) obj;
+    return new EqualsBuilder().append(this.id, that.id).isEquals();
+  }
+
+  @Override
+  public String toString() {
+    return Byte.toString(id);
+  }
+}

+ 49 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Opaque interface that identifies a disk location. Subclasses
+ * should implement {@link Comparable} and override both equals and hashCode.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Public
+public interface VolumeId extends Comparable<VolumeId> {
+
+  /**
+   * Indicates if the disk identifier is valid. Invalid identifiers indicate
+   * that the block was not present, or the location could otherwise not be
+   * determined.
+   * 
+   * @return true if the disk identifier is valid
+   */
+  public boolean isValid();
+
+  @Override
+  abstract public int compareTo(VolumeId arg0);
+
+  @Override
+  abstract public int hashCode();
+
+  @Override
+  abstract public boolean equals(Object obj);
+
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -290,7 +290,7 @@ class BlockReaderLocal implements BlockReader {
       long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
       long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
       throws IOException {
       throws IOException {
     this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
     this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
-        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL, 4), false,
+        DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
         dataIn, startOffset, null);
         dataIn, startOffset, null);
   }
   }
 
 

+ 337 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java

@@ -0,0 +1,337 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStorageLocation;
+import org.apache.hadoop.fs.HdfsVolumeId;
+import org.apache.hadoop.fs.VolumeId;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.token.Token;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class BlockStorageLocationUtil {
+  
+  private static final Log LOG = LogFactory
+      .getLog(BlockStorageLocationUtil.class);
+  
+  /**
+   * Create a list of {@link VolumeBlockLocationCallable} corresponding to a set
+   * of datanodes and blocks.
+   * 
+   * @param datanodeBlocks
+   *          Map of datanodes to block replicas at each datanode
+   * @return callables Used to query each datanode for location information on
+   *         the block replicas at the datanode
+   */
+  private static List<VolumeBlockLocationCallable> createVolumeBlockLocationCallables(
+      Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
+      int timeout, boolean connectToDnViaHostname) {
+    // Construct the callables, one per datanode
+    List<VolumeBlockLocationCallable> callables = 
+        new ArrayList<VolumeBlockLocationCallable>();
+    for (Map.Entry<DatanodeInfo, List<LocatedBlock>> entry : datanodeBlocks
+        .entrySet()) {
+      // Construct RPC parameters
+      DatanodeInfo datanode = entry.getKey();
+      List<LocatedBlock> locatedBlocks = entry.getValue();
+      List<ExtendedBlock> extendedBlocks = 
+          new ArrayList<ExtendedBlock>(locatedBlocks.size());
+      List<Token<BlockTokenIdentifier>> dnTokens = 
+          new ArrayList<Token<BlockTokenIdentifier>>(
+          locatedBlocks.size());
+      for (LocatedBlock b : locatedBlocks) {
+        extendedBlocks.add(b.getBlock());
+        dnTokens.add(b.getBlockToken());
+      }
+      VolumeBlockLocationCallable callable = new VolumeBlockLocationCallable(
+          conf, datanode, extendedBlocks, dnTokens, timeout, 
+          connectToDnViaHostname);
+      callables.add(callable);
+    }
+    return callables;
+  }
+  
+  /**
+   * Queries datanodes for the blocks specified in <code>datanodeBlocks</code>,
+   * making one RPC to each datanode. These RPCs are made in parallel using a
+   * threadpool.
+   * 
+   * @param datanodeBlocks
+   *          Map of datanodes to the blocks present on the DN
+   * @return metadatas List of block metadata for each datanode, specifying
+   *         volume locations for each block
+   * @throws InvalidBlockTokenException
+   *           if client does not have read access on a requested block
+   */
+  static List<HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(
+      Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
+      int poolsize, int timeout, boolean connectToDnViaHostname)
+      throws InvalidBlockTokenException {
+
+    List<VolumeBlockLocationCallable> callables = 
+        createVolumeBlockLocationCallables(conf, datanodeBlocks, timeout, 
+            connectToDnViaHostname);
+    
+    // Use a thread pool to execute the Callables in parallel
+    List<Future<HdfsBlocksMetadata>> futures = 
+        new ArrayList<Future<HdfsBlocksMetadata>>();
+    ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize);
+    try {
+      futures = executor.invokeAll(callables, timeout, TimeUnit.SECONDS);
+    } catch (InterruptedException e) {
+      // Swallow the exception here, because we can return partial results
+    }
+    executor.shutdown();
+    
+    // Initialize metadatas list with nulls
+    // This is used to later indicate if we didn't get a response from a DN
+    List<HdfsBlocksMetadata> metadatas = new ArrayList<HdfsBlocksMetadata>();
+    for (int i = 0; i < futures.size(); i++) {
+      metadatas.add(null);
+    }
+    // Fill in metadatas with results from DN RPCs, where possible
+    for (int i = 0; i < futures.size(); i++) {
+      Future<HdfsBlocksMetadata> future = futures.get(i);
+      try {
+        HdfsBlocksMetadata metadata = future.get();
+        metadatas.set(i, metadata);
+      } catch (ExecutionException e) {
+        VolumeBlockLocationCallable callable = callables.get(i);
+        DatanodeInfo datanode = callable.getDatanodeInfo();
+        Throwable t = e.getCause();
+        if (t instanceof InvalidBlockTokenException) {
+          LOG.warn("Invalid access token when trying to retrieve "
+              + "information from datanode " + datanode.getIpcAddr(false));
+          throw (InvalidBlockTokenException) t;
+        }
+        else if (t instanceof UnsupportedOperationException) {
+          LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support"
+              + " required #getHdfsBlocksMetadata() API");
+          throw (UnsupportedOperationException) t;
+        } else {
+          LOG.info("Failed to connect to datanode " +
+              datanode.getIpcAddr(false));
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Could not fetch information from datanode", t);
+        }
+      } catch (InterruptedException e) {
+        // Shouldn't happen, because invokeAll waits for all Futures to be ready
+        LOG.info("Interrupted while fetching HdfsBlocksMetadata");
+      }
+    }
+    
+    return metadatas;
+  }
+  
+  /**
+   * Group the per-replica {@link VolumeId} info returned from
+   * {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be associated
+   * with the corresponding {@link LocatedBlock}.
+   * 
+   * @param blocks
+   *          Original LocatedBlock array
+   * @param datanodeBlocks
+   *          Mapping from datanodes to the list of replicas on each datanode
+   * @param metadatas
+   *          VolumeId information for the replicas on each datanode
+   * @return blockVolumeIds per-replica VolumeId information associated with the
+   *         parent LocatedBlock
+   */
+  static Map<LocatedBlock, List<VolumeId>> associateVolumeIdsWithBlocks(
+      List<LocatedBlock> blocks, Map<DatanodeInfo, 
+      List<LocatedBlock>> datanodeBlocks, List<HdfsBlocksMetadata> metadatas) {
+    
+    // Initialize mapping of ExtendedBlock to LocatedBlock. 
+    // Used to associate results from DN RPCs to the parent LocatedBlock
+    Map<ExtendedBlock, LocatedBlock> extBlockToLocBlock = 
+        new HashMap<ExtendedBlock, LocatedBlock>();
+    for (LocatedBlock b : blocks) {
+      extBlockToLocBlock.put(b.getBlock(), b);
+    }
+    
+    // Initialize the mapping of blocks -> list of VolumeIds, one per replica
+    // This is filled out with real values from the DN RPCs
+    Map<LocatedBlock, List<VolumeId>> blockVolumeIds = 
+        new HashMap<LocatedBlock, List<VolumeId>>();
+    for (LocatedBlock b : blocks) {
+      ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
+      // Start off all IDs as invalid, fill it in later with results from RPCs
+      for (int i = 0; i < b.getLocations().length; i++) {
+        l.add(new HdfsVolumeId((byte)-1, false));
+      }
+      blockVolumeIds.put(b, l);
+    }
+    
+    // Iterate through the list of metadatas (one per datanode). 
+    // For each metadata, if it's valid, insert its volume location information 
+    // into the Map returned to the caller 
+    Iterator<HdfsBlocksMetadata> metadatasIter = metadatas.iterator();
+    Iterator<DatanodeInfo> datanodeIter = datanodeBlocks.keySet().iterator();
+    while (metadatasIter.hasNext()) {
+      HdfsBlocksMetadata metadata = metadatasIter.next();
+      DatanodeInfo datanode = datanodeIter.next();
+      // Check if metadata is valid
+      if (metadata == null) {
+        continue;
+      }
+      ExtendedBlock[] metaBlocks = metadata.getBlocks();
+      List<byte[]> metaVolumeIds = metadata.getVolumeIds();
+      List<Integer> metaVolumeIndexes = metadata.getVolumeIndexes();
+      // Add VolumeId for each replica in the HdfsBlocksMetadata
+      for (int j = 0; j < metaBlocks.length; j++) {
+        int volumeIndex = metaVolumeIndexes.get(j);
+        ExtendedBlock extBlock = metaBlocks[j];
+        // Skip if block wasn't found, or not a valid index into metaVolumeIds
+        // Also skip if the DN responded with a block we didn't ask for
+        if (volumeIndex == Integer.MAX_VALUE
+            || volumeIndex >= metaVolumeIds.size()
+            || !extBlockToLocBlock.containsKey(extBlock)) {
+          continue;
+        }
+        // Get the VolumeId by indexing into the list of VolumeIds
+        // provided by the datanode
+        HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0],
+            true);
+        // Find out which index we are in the LocatedBlock's replicas
+        LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
+        DatanodeInfo[] dnInfos = locBlock.getLocations();
+        int index = -1;
+        for (int k = 0; k < dnInfos.length; k++) {
+          if (dnInfos[k].equals(datanode)) {
+            index = k;
+            break;
+          }
+        }
+        if (index < 0) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Datanode responded with a block volume id we did" +
+                " not request, omitting.");
+          }
+          continue;
+        }
+        // Place VolumeId at the same index as the DN's index in the list of
+        // replicas
+        List<VolumeId> VolumeIds = blockVolumeIds.get(locBlock);
+        VolumeIds.set(index, id);
+      }
+    }
+    return blockVolumeIds;
+  }
+
+  /**
+   * Helper method to combine a list of {@link LocatedBlock} with associated
+   * {@link VolumeId} information to form a list of {@link BlockStorageLocation}
+   * .
+   */
+  static BlockStorageLocation[] convertToVolumeBlockLocations(
+      List<LocatedBlock> blocks, 
+      Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
+    // Construct the final return value of VolumeBlockLocation[]
+    BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
+    List<BlockStorageLocation> volumeBlockLocs = 
+        new ArrayList<BlockStorageLocation>(locations.length);
+    for (int i = 0; i < locations.length; i++) {
+      LocatedBlock locBlock = blocks.get(i);
+      List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
+      BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i], 
+          volumeIds.toArray(new VolumeId[0]));
+      volumeBlockLocs.add(bsLoc);
+    }
+    return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
+  }
+  
+  /**
+   * Callable that sets up an RPC proxy to a datanode and queries it for
+   * volume location information for a list of ExtendedBlocks. 
+   */
+  private static class VolumeBlockLocationCallable implements 
+    Callable<HdfsBlocksMetadata> {
+    
+    private Configuration configuration;
+    private int timeout;
+    private DatanodeInfo datanode;
+    private List<ExtendedBlock> extendedBlocks;
+    private List<Token<BlockTokenIdentifier>> dnTokens;
+    private boolean connectToDnViaHostname;
+    
+    VolumeBlockLocationCallable(Configuration configuration,
+        DatanodeInfo datanode, List<ExtendedBlock> extendedBlocks,
+        List<Token<BlockTokenIdentifier>> dnTokens, int timeout, 
+        boolean connectToDnViaHostname) {
+      this.configuration = configuration;
+      this.timeout = timeout;
+      this.datanode = datanode;
+      this.extendedBlocks = extendedBlocks;
+      this.dnTokens = dnTokens;
+      this.connectToDnViaHostname = connectToDnViaHostname;
+    }
+    
+    public DatanodeInfo getDatanodeInfo() {
+      return datanode;
+    }
+
+    @Override
+    public HdfsBlocksMetadata call() throws Exception {
+      HdfsBlocksMetadata metadata = null;
+      // Create the RPC proxy and make the RPC
+      ClientDatanodeProtocol cdp = null;
+      try {
+        cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
+            timeout, connectToDnViaHostname);
+        metadata = cdp.getHdfsBlocksMetadata(extendedBlocks, dnTokens);
+      } catch (IOException e) {
+        // Bubble this up to the caller, handle with the Future
+        throw e;
+      } finally {
+        if (cdp != null) {
+          RPC.stopProxy(cdp);
+        }
+      }
+      return metadata;
+    }
+  }
+}

+ 51 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java

@@ -22,12 +22,15 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
 
 
 import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.HttpHeaders;
 
 
 /**
 /**
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
@@ -70,7 +73,7 @@ public abstract class ByteRangeInputStream extends FSInputStream {
   protected URLOpener resolvedURL;
   protected URLOpener resolvedURL;
   protected long startPos = 0;
   protected long startPos = 0;
   protected long currentPos = 0;
   protected long currentPos = 0;
-  protected long filelength;
+  protected Long fileLength = null;
 
 
   StreamStatus status = StreamStatus.SEEK;
   StreamStatus status = StreamStatus.SEEK;
 
 
@@ -114,28 +117,60 @@ public abstract class ByteRangeInputStream extends FSInputStream {
     final URLOpener opener = resolved? resolvedURL: originalURL;
     final URLOpener opener = resolved? resolvedURL: originalURL;
 
 
     final HttpURLConnection connection = opener.connect(startPos, resolved);
     final HttpURLConnection connection = opener.connect(startPos, resolved);
-    final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
-    if (cl == null) {
-      throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
+    resolvedURL.setURL(getResolvedUrl(connection));
+
+    InputStream in = connection.getInputStream();
+    final Map<String, List<String>> headers = connection.getHeaderFields();
+    if (isChunkedTransferEncoding(headers)) {
+      // file length is not known
+      fileLength = null;
+    } else {
+      // for non-chunked transfer-encoding, get content-length
+      final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH);
+      if (cl == null) {
+        throw new IOException(HttpHeaders.CONTENT_LENGTH + " is missing: "
+            + headers);
+      }
+      final long streamlength = Long.parseLong(cl);
+      fileLength = startPos + streamlength;
+
+      // Java has a bug with >2GB request streams.  It won't bounds check
+      // the reads so the transfer blocks until the server times out
+      in = new BoundedInputStream(in, streamlength);
     }
     }
-    final long streamlength = Long.parseLong(cl);
-    filelength = startPos + streamlength;
-    // Java has a bug with >2GB request streams.  It won't bounds check
-    // the reads so the transfer blocks until the server times out
-    InputStream is =
-        new BoundedInputStream(connection.getInputStream(), streamlength);
 
 
-    resolvedURL.setURL(getResolvedUrl(connection));
-    
-    return is;
+    return in;
   }
   }
   
   
+  private static boolean isChunkedTransferEncoding(
+      final Map<String, List<String>> headers) {
+    return contains(headers, HttpHeaders.TRANSFER_ENCODING, "chunked")
+        || contains(headers, HttpHeaders.TE, "chunked");
+  }
+
+  /** Does the HTTP header map contain the given key, value pair? */
+  private static boolean contains(final Map<String, List<String>> headers,
+      final String key, final String value) {
+    final List<String> values = headers.get(key);
+    if (values != null) {
+      for(String v : values) {
+        for(final StringTokenizer t = new StringTokenizer(v, ",");
+            t.hasMoreTokens(); ) {
+          if (value.equalsIgnoreCase(t.nextToken())) {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
   private int update(final int n) throws IOException {
   private int update(final int n) throws IOException {
     if (n != -1) {
     if (n != -1) {
       currentPos += n;
       currentPos += n;
-    } else if (currentPos < filelength) {
+    } else if (fileLength != null && currentPos < fileLength) {
       throw new IOException("Got EOF but currentPos = " + currentPos
       throw new IOException("Got EOF but currentPos = " + currentPos
-          + " < filelength = " + filelength);
+          + " < filelength = " + fileLength);
     }
     }
     return n;
     return n;
   }
   }

+ 174 - 48
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -45,8 +45,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKRE
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
@@ -69,6 +67,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
@@ -80,6 +79,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -87,12 +87,17 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.HdfsBlockLocation;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -102,10 +107,10 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -120,11 +125,11 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -199,8 +204,7 @@ public class DFSClient implements java.io.Closeable {
     final int maxBlockAcquireFailures;
     final int maxBlockAcquireFailures;
     final int confTime;
     final int confTime;
     final int ioBufferSize;
     final int ioBufferSize;
-    final int checksumType;
-    final int bytesPerChecksum;
+    final ChecksumOpt defaultChecksumOpt;
     final int writePacketSize;
     final int writePacketSize;
     final int socketTimeout;
     final int socketTimeout;
     final int socketCacheCapacity;
     final int socketCacheCapacity;
@@ -216,6 +220,9 @@ public class DFSClient implements java.io.Closeable {
     final FsPermission uMask;
     final FsPermission uMask;
     final boolean useLegacyBlockReader;
     final boolean useLegacyBlockReader;
     final boolean connectToDnViaHostname;
     final boolean connectToDnViaHostname;
+    final boolean getHdfsBlocksMetadataEnabled;
+    final int getFileBlockStorageLocationsNumThreads;
+    final int getFileBlockStorageLocationsTimeout;
 
 
     Conf(Configuration conf) {
     Conf(Configuration conf) {
       maxFailoverAttempts = conf.getInt(
       maxFailoverAttempts = conf.getInt(
@@ -236,9 +243,7 @@ public class DFSClient implements java.io.Closeable {
       ioBufferSize = conf.getInt(
       ioBufferSize = conf.getInt(
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-      checksumType = getChecksumType(conf);
-      bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
-          DFS_BYTES_PER_CHECKSUM_DEFAULT);
+      defaultChecksumOpt = getChecksumOptFromConf(conf);
       socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
       socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
           HdfsServerConstants.READ_TIMEOUT);
           HdfsServerConstants.READ_TIMEOUT);
       /** dfs.write.packet.size is an internal config variable */
       /** dfs.write.packet.size is an internal config variable */
@@ -268,26 +273,57 @@ public class DFSClient implements java.io.Closeable {
           DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
           DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
       connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
       connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
           DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
           DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
-    }
-
-    private int getChecksumType(Configuration conf) {
-      String checksum = conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
+      getHdfsBlocksMetadataEnabled = conf.getBoolean(
+          DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
+          DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
+      getFileBlockStorageLocationsNumThreads = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
+          DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
+      getFileBlockStorageLocationsTimeout = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT,
+          DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_DEFAULT);
+    }
+
+    private DataChecksum.Type getChecksumType(Configuration conf) {
+      final String checksum = conf.get(
+          DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
           DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
           DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
-      if ("CRC32".equals(checksum)) {
-        return DataChecksum.CHECKSUM_CRC32;
-      } else if ("CRC32C".equals(checksum)) {
-        return DataChecksum.CHECKSUM_CRC32C;
-      } else if ("NULL".equals(checksum)) {
-        return DataChecksum.CHECKSUM_NULL;
-      } else {
-        LOG.warn("Bad checksum type: " + checksum + ". Using default.");
-        return DataChecksum.CHECKSUM_CRC32C;
+      try {
+        return DataChecksum.Type.valueOf(checksum);
+      } catch(IllegalArgumentException iae) {
+        LOG.warn("Bad checksum type: " + checksum + ". Using default "
+            + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
+        return DataChecksum.Type.valueOf(
+            DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
       }
       }
     }
     }
 
 
-    private DataChecksum createChecksum() {
-      return DataChecksum.newDataChecksum(
-          checksumType, bytesPerChecksum);
+    // Construct a checksum option from conf
+    private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+      DataChecksum.Type type = getChecksumType(conf);
+      int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
+          DFS_BYTES_PER_CHECKSUM_DEFAULT);
+      return new ChecksumOpt(type, bytesPerChecksum);
+    }
+
+    // create a DataChecksum with the default option.
+    private DataChecksum createChecksum() throws IOException {
+      return createChecksum(null);
+    }
+
+    private DataChecksum createChecksum(ChecksumOpt userOpt) 
+        throws IOException {
+      // Fill in any missing field with the default.
+      ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
+          defaultChecksumOpt, userOpt);
+      DataChecksum dataChecksum = DataChecksum.newDataChecksum(
+          myOpt.getChecksumType(),
+          myOpt.getBytesPerChecksum());
+      if (dataChecksum == null) {
+        throw new IOException("Invalid checksum type specified: "
+            + myOpt.getChecksumType().name());
+      }
+      return dataChecksum;
     }
     }
   }
   }
  
  
@@ -943,7 +979,81 @@ public class DFSClient implements java.io.Closeable {
   public BlockLocation[] getBlockLocations(String src, long start, 
   public BlockLocation[] getBlockLocations(String src, long start, 
     long length) throws IOException, UnresolvedLinkException {
     long length) throws IOException, UnresolvedLinkException {
     LocatedBlocks blocks = getLocatedBlocks(src, start, length);
     LocatedBlocks blocks = getLocatedBlocks(src, start, length);
-    return DFSUtil.locatedBlocks2Locations(blocks);
+    BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
+    HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
+    for (int i = 0; i < locations.length; i++) {
+      hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
+    }
+    return hdfsLocations;
+  }
+  
+  /**
+   * Get block location information about a list of {@link HdfsBlockLocation}.
+   * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
+   * get {@link BlockStorageLocation}s for blocks returned by
+   * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
+   * .
+   * 
+   * This is done by making a round of RPCs to the associated datanodes, asking
+   * the volume of each block replica. The returned array of
+   * {@link BlockStorageLocation} expose this information as a
+   * {@link VolumeId}.
+   * 
+   * @param blockLocations
+   *          target blocks on which to query volume location information
+   * @return volumeBlockLocations original block array augmented with additional
+   *         volume location information for each replica.
+   */
+  public BlockStorageLocation[] getBlockStorageLocations(
+      List<BlockLocation> blockLocations) throws IOException,
+      UnsupportedOperationException, InvalidBlockTokenException {
+    if (!getConf().getHdfsBlocksMetadataEnabled) {
+      throw new UnsupportedOperationException("Datanode-side support for " +
+          "getVolumeBlockLocations() must also be enabled in the client " +
+          "configuration.");
+    }
+    // Downcast blockLocations and fetch out required LocatedBlock(s)
+    List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
+    for (BlockLocation loc : blockLocations) {
+      if (!(loc instanceof HdfsBlockLocation)) {
+        throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
+            "expected to be passed HdfsBlockLocations");
+      }
+      HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
+      blocks.add(hdfsLoc.getLocatedBlock());
+    }
+    
+    // Re-group the LocatedBlocks to be grouped by datanodes, with the values
+    // a list of the LocatedBlocks on the datanode.
+    Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks = 
+        new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
+    for (LocatedBlock b : blocks) {
+      for (DatanodeInfo info : b.getLocations()) {
+        if (!datanodeBlocks.containsKey(info)) {
+          datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
+        }
+        List<LocatedBlock> l = datanodeBlocks.get(info);
+        l.add(b);
+      }
+    }
+        
+    // Make RPCs to the datanodes to get volume locations for its replicas
+    List<HdfsBlocksMetadata> metadatas = BlockStorageLocationUtil
+        .queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
+            getConf().getFileBlockStorageLocationsNumThreads,
+            getConf().getFileBlockStorageLocationsTimeout,
+            getConf().connectToDnViaHostname);
+    
+    // Regroup the returned VolumeId metadata to again be grouped by
+    // LocatedBlock rather than by datanode
+    Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
+        .associateVolumeIdsWithBlocks(blocks, datanodeBlocks, metadatas);
+    
+    // Combine original BlockLocations with new VolumeId information
+    BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
+        .convertToVolumeBlockLocations(blocks, blockVolumeIds);
+
+    return volumeBlockLocations;
   }
   }
   
   
   public DFSInputStream open(String src) 
   public DFSInputStream open(String src) 
@@ -1054,12 +1164,13 @@ public class DFSClient implements java.io.Closeable {
     return create(src, FsPermission.getDefault(),
     return create(src, FsPermission.getDefault(),
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
             : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
             : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
-        buffersize);
+        buffersize, null);
   }
   }
 
 
   /**
   /**
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
-   * long, Progressable, int)} with <code>createParent</code> set to true.
+   * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
+   *  set to true.
    */
    */
   public DFSOutputStream create(String src, 
   public DFSOutputStream create(String src, 
                              FsPermission permission,
                              FsPermission permission,
@@ -1067,10 +1178,11 @@ public class DFSClient implements java.io.Closeable {
                              short replication,
                              short replication,
                              long blockSize,
                              long blockSize,
                              Progressable progress,
                              Progressable progress,
-                             int buffersize)
+                             int buffersize,
+                             ChecksumOpt checksumOpt)
       throws IOException {
       throws IOException {
     return create(src, permission, flag, true,
     return create(src, permission, flag, true,
-        replication, blockSize, progress, buffersize);
+        replication, blockSize, progress, buffersize, checksumOpt);
   }
   }
 
 
   /**
   /**
@@ -1088,6 +1200,7 @@ public class DFSClient implements java.io.Closeable {
    * @param blockSize maximum block size
    * @param blockSize maximum block size
    * @param progress interface for reporting client progress
    * @param progress interface for reporting client progress
    * @param buffersize underlying buffer size 
    * @param buffersize underlying buffer size 
+   * @param checksumOpt checksum options
    * 
    * 
    * @return output stream
    * @return output stream
    * 
    * 
@@ -1101,8 +1214,8 @@ public class DFSClient implements java.io.Closeable {
                              short replication,
                              short replication,
                              long blockSize,
                              long blockSize,
                              Progressable progress,
                              Progressable progress,
-                             int buffersize)
-    throws IOException {
+                             int buffersize,
+                             ChecksumOpt checksumOpt) throws IOException {
     checkOpen();
     checkOpen();
     if (permission == null) {
     if (permission == null) {
       permission = FsPermission.getDefault();
       permission = FsPermission.getDefault();
@@ -1113,7 +1226,7 @@ public class DFSClient implements java.io.Closeable {
     }
     }
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
         src, masked, flag, createParent, replication, blockSize, progress,
         src, masked, flag, createParent, replication, blockSize, progress,
-        buffersize, dfsClientConf.createChecksum());
+        buffersize, dfsClientConf.createChecksum(checksumOpt));
     beginFileLease(src, result);
     beginFileLease(src, result);
     return result;
     return result;
   }
   }
@@ -1151,15 +1264,13 @@ public class DFSClient implements java.io.Closeable {
                              long blockSize,
                              long blockSize,
                              Progressable progress,
                              Progressable progress,
                              int buffersize,
                              int buffersize,
-                             int bytesPerChecksum)
+                             ChecksumOpt checksumOpt)
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
     checkOpen();
     checkOpen();
     CreateFlag.validate(flag);
     CreateFlag.validate(flag);
     DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
     DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
     if (result == null) {
     if (result == null) {
-      DataChecksum checksum = DataChecksum.newDataChecksum(
-          dfsClientConf.checksumType,
-          bytesPerChecksum);
+      DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
       result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
       result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
           flag, createParent, replication, blockSize, progress, buffersize,
           checksum);
           checksum);
@@ -1494,7 +1605,8 @@ public class DFSClient implements java.io.Closeable {
     }
     }
     List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
     List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
     final DataOutputBuffer md5out = new DataOutputBuffer();
     final DataOutputBuffer md5out = new DataOutputBuffer();
-    int bytesPerCRC = 0;
+    int bytesPerCRC = -1;
+    DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
     long crcPerBlock = 0;
     long crcPerBlock = 0;
     boolean refetchBlocks = false;
     boolean refetchBlocks = false;
     int lastRetriedIndex = -1;
     int lastRetriedIndex = -1;
@@ -1598,6 +1710,17 @@ public class DFSClient implements java.io.Closeable {
               checksumData.getMd5().toByteArray());
               checksumData.getMd5().toByteArray());
           md5.write(md5out);
           md5.write(md5out);
           
           
+          // read crc-type
+          final DataChecksum.Type ct = HdfsProtoUtil.
+              fromProto(checksumData.getCrcType());
+          if (i == 0) { // first block
+            crcType = ct;
+          } else if (crcType != DataChecksum.Type.MIXED
+              && crcType != ct) {
+            // if crc types are mixed in a file
+            crcType = DataChecksum.Type.MIXED;
+          }
+
           done = true;
           done = true;
 
 
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
@@ -1623,7 +1746,18 @@ public class DFSClient implements java.io.Closeable {
 
 
     //compute file MD5
     //compute file MD5
     final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
     final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
-    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
+    switch (crcType) {
+      case CRC32:
+        return new MD5MD5CRC32GzipFileChecksum(bytesPerCRC,
+            crcPerBlock, fileMD5);
+      case CRC32C:
+        return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
+            crcPerBlock, fileMD5);
+      default:
+        // we should never get here since the validity was checked
+        // when getCrcType() was called above.
+        return null;
+    }
   }
   }
 
 
   /**
   /**
@@ -1786,14 +1920,6 @@ public class DFSClient implements java.io.Closeable {
     namenode.finalizeUpgrade();
     namenode.finalizeUpgrade();
   }
   }
 
 
-  /**
-   * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
-   */
-  public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
-      throws IOException {
-    return namenode.distributedUpgradeProgress(action);
-  }
-
   /**
   /**
    */
    */
   @Deprecated
   @Deprecated

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -54,6 +54,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
   public static final int     DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
   public static final String  DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
   public static final String  DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
   public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
   public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
+  public static final String  DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled";
+  public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
+  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads";
+  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
+  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT = "dfs.client.file-block-storage-locations.timeout";
+  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_DEFAULT = 60;
 
 
   // HA related configuration
   // HA related configuration
   public static final String  DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
   public static final String  DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
@@ -172,7 +178,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // Replication monitoring related keys
   // Replication monitoring related keys
   public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =
   public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =
       "dfs.namenode.invalidate.work.pct.per.iteration";
       "dfs.namenode.invalidate.work.pct.per.iteration";
-  public static final int DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT = 32;
+  public static final float DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT = 0.32f;
   public static final String DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION =
   public static final String DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION =
       "dfs.namenode.replication.work.multiplier.per.iteration";
       "dfs.namenode.replication.work.multiplier.per.iteration";
   public static final int DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT = 2;
   public static final int DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT = 2;
@@ -245,7 +251,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
   public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
   public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
-  public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
+  public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
   public static final int     DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
   public static final int     DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;

+ 53 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -80,6 +80,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.BlockingService;
@@ -282,13 +283,25 @@ public class DFSUtil {
     if (blocks == null) {
     if (blocks == null) {
       return new BlockLocation[0];
       return new BlockLocation[0];
     }
     }
-    int nrBlocks = blocks.locatedBlockCount();
+    return locatedBlocks2Locations(blocks.getLocatedBlocks());
+  }
+  
+  /**
+   * Convert a List<LocatedBlock> to BlockLocation[]
+   * @param blocks A List<LocatedBlock> to be converted
+   * @return converted array of BlockLocation
+   */
+  public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
+    if (blocks == null) {
+      return new BlockLocation[0];
+    }
+    int nrBlocks = blocks.size();
     BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
     BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
     if (nrBlocks == 0) {
     if (nrBlocks == 0) {
       return blkLocations;
       return blkLocations;
     }
     }
     int idx = 0;
     int idx = 0;
-    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
+    for (LocatedBlock blk : blocks) {
       assert idx < nrBlocks : "Incorrect index";
       assert idx < nrBlocks : "Incorrect index";
       DatanodeInfo[] locations = blk.getLocations();
       DatanodeInfo[] locations = blk.getLocations();
       String[] hosts = new String[locations.length];
       String[] hosts = new String[locations.length];
@@ -1131,4 +1144,42 @@ public class DFSUtil {
     }
     }
     return false;
     return false;
   }
   }
+  
+  /**
+   * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
+   * 
+   * @param conf Configuration
+   * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
+   */
+  public static float getInvalidateWorkPctPerIteration(Configuration conf) {
+    float blocksInvalidateWorkPct = conf.getFloat(
+        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
+        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
+    Preconditions.checkArgument(
+        (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
+        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
+        " = '" + blocksInvalidateWorkPct + "' is invalid. " +
+        "It should be a positive, non-zero float value, not greater than 1.0f, " +
+        "to indicate a percentage.");
+    return blocksInvalidateWorkPct;
+  }
+
+  /**
+   * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   * 
+   * @param conf Configuration
+   * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getReplWorkMultiplier(Configuration conf) {
+    int blocksReplWorkMultiplier = conf.getInt(
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
+            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+    Preconditions.checkArgument(
+        (blocksReplWorkMultiplier > 0),
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
+        " = '" + blocksReplWorkMultiplier + "' is invalid. " +
+        "It should be a positive, non-zero integer value.");
+    return blocksReplWorkMultiplier;
+  }
 }
 }

+ 42 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -32,6 +32,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.BlockStorageLocation;
+import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
@@ -40,6 +42,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -52,12 +55,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -189,6 +191,36 @@ public class DistributedFileSystem extends FileSystem {
 
 
   }
   }
 
 
+  /**
+   * Used to query storage location information for a list of blocks. This list
+   * of blocks is normally constructed via a series of calls to
+   * {@link DistributedFileSystem#getFileBlockLocations(Path, long, long)} to
+   * get the blocks for ranges of a file.
+   * 
+   * The returned array of {@link BlockStorageLocation} augments
+   * {@link BlockLocation} with a {@link VolumeId} per block replica. The
+   * VolumeId specifies the volume on the datanode on which the replica resides.
+   * The VolumeId has to be checked via {@link VolumeId#isValid()} before being
+   * used because volume information can be unavailable if the corresponding
+   * datanode is down or if the requested block is not found.
+   * 
+   * This API is unstable, and datanode-side support is disabled by default. It
+   * can be enabled by setting "dfs.datanode.hdfs-blocks-metadata.enabled" to
+   * true.
+   * 
+   * @param blocks
+   *          List of target BlockLocations to query volume location information
+   * @return volumeBlockLocations Augmented array of
+   *         {@link BlockStorageLocation}s containing additional volume location
+   *         information for each replica of each block.
+   */
+  @InterfaceStability.Unstable
+  public BlockStorageLocation[] getFileBlockStorageLocations(
+      List<BlockLocation> blocks) throws IOException, 
+      UnsupportedOperationException, InvalidBlockTokenException {
+    return dfs.getBlockStorageLocations(blocks);
+  }
+
   @Override
   @Override
   public void setVerifyChecksum(boolean verifyChecksum) {
   public void setVerifyChecksum(boolean verifyChecksum) {
     this.verifyChecksum = verifyChecksum;
     this.verifyChecksum = verifyChecksum;
@@ -225,19 +257,19 @@ public class DistributedFileSystem extends FileSystem {
   public HdfsDataOutputStream create(Path f, FsPermission permission,
   public HdfsDataOutputStream create(Path f, FsPermission permission,
       boolean overwrite, int bufferSize, short replication, long blockSize,
       boolean overwrite, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
-    return create(f, permission,
+    return this.create(f, permission,
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
             : EnumSet.of(CreateFlag.CREATE), bufferSize, replication,
             : EnumSet.of(CreateFlag.CREATE), bufferSize, replication,
-        blockSize, progress);
+        blockSize, progress, null);
   }
   }
   
   
   @Override
   @Override
   public HdfsDataOutputStream create(Path f, FsPermission permission,
   public HdfsDataOutputStream create(Path f, FsPermission permission,
     EnumSet<CreateFlag> cflags, int bufferSize, short replication, long blockSize,
     EnumSet<CreateFlag> cflags, int bufferSize, short replication, long blockSize,
-    Progressable progress) throws IOException {
+    Progressable progress, ChecksumOpt checksumOpt) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
     final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
-        replication, blockSize, progress, bufferSize);
+        replication, blockSize, progress, bufferSize, checksumOpt);
     return new HdfsDataOutputStream(out, statistics);
     return new HdfsDataOutputStream(out, statistics);
   }
   }
   
   
@@ -246,11 +278,11 @@ public class DistributedFileSystem extends FileSystem {
   protected HdfsDataOutputStream primitiveCreate(Path f,
   protected HdfsDataOutputStream primitiveCreate(Path f,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     short replication, long blockSize, Progressable progress,
     short replication, long blockSize, Progressable progress,
-    int bytesPerChecksum) throws IOException {
+    ChecksumOpt checksumOpt) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
     return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
     return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
         absolutePermission, flag, true, replication, blockSize,
         absolutePermission, flag, true, replication, blockSize,
-        progress, bufferSize, bytesPerChecksum),statistics);
+        progress, bufferSize, checksumOpt),statistics);
    } 
    } 
 
 
   /**
   /**
@@ -265,7 +297,8 @@ public class DistributedFileSystem extends FileSystem {
       flag.add(CreateFlag.CREATE);
       flag.add(CreateFlag.CREATE);
     }
     }
     return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
     return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
-        false, replication, blockSize, progress, bufferSize), statistics);
+        false, replication, blockSize, progress, 
+        bufferSize, null), statistics);
   }
   }
 
 
   @Override
   @Override
@@ -619,11 +652,6 @@ public class DistributedFileSystem extends FileSystem {
     dfs.finalizeUpgrade();
     dfs.finalizeUpgrade();
   }
   }
 
 
-  public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
-  ) throws IOException {
-    return dfs.distributedUpgradeProgress(action);
-  }
-
   /*
   /*
    * Requests the namenode to dump data strcutures into specified 
    * Requests the namenode to dump data strcutures into specified 
    * file.
    * file.
@@ -765,14 +793,6 @@ public class DistributedFileSystem extends FileSystem {
     return getDelegationToken(renewer.toString());
     return getDelegationToken(renewer.toString());
   }
   }
   
   
-  @Override // FileSystem
-  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
-    List<Token<?>> tokenList = new ArrayList<Token<?>>();
-    Token<DelegationTokenIdentifier> token = this.getDelegationToken(renewer);
-    tokenList.add(token);
-    return tokenList;
-  }
-
   /**
   /**
    * Renew an existing delegation token.
    * Renew an existing delegation token.
    * 
    * 

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -106,4 +107,21 @@ public interface ClientDatanodeProtocol {
    */
    */
   BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
   BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
       Token<BlockTokenIdentifier> token) throws IOException;
       Token<BlockTokenIdentifier> token) throws IOException;
+  
+  /**
+   * Retrieves volume location information about a list of blocks on a datanode.
+   * This is in the form of an opaque {@link VolumeId} for each configured
+   * data directory, which is not guaranteed to be the same across DN restarts.
+   * 
+   * @param blocks
+   *          list of blocks on the local datanode
+   * @param tokens
+   *          block access tokens corresponding to the requested blocks
+   * @return an HdfsBlocksMetadata that associates {@link ExtendedBlock}s with
+   *         data directories
+   * @throws IOException
+   *           if datanode is unreachable, or replica is not found on datanode
+   */
+  HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
+      List<Token<BlockTokenIdentifier>> tokens) throws IOException; 
 }
 }

+ 0 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -33,8 +33,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -694,16 +692,6 @@ public interface ClientProtocol {
    */
    */
   public void finalizeUpgrade() throws IOException;
   public void finalizeUpgrade() throws IOException;
 
 
-  /**
-   * Report distributed upgrade progress or force current upgrade to proceed.
-   * 
-   * @param action {@link HdfsConstants.UpgradeAction} to perform
-   * @return upgrade status information or null if no upgrades are in progress
-   * @throws IOException
-   */
-  public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) 
-      throws IOException;
-
   /**
   /**
    * @return CorruptFileBlocks, containing a list of corrupt files (with
    * @return CorruptFileBlocks, containing a list of corrupt files (with
    *         duplicates if there is more than one corrupt block in a file)
    *         duplicates if there is more than one corrupt block in a file)

+ 94 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java

@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Augments an array of blocks on a datanode with additional information about
+ * where the block is stored.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HdfsBlocksMetadata {
+  
+  /**
+   * List of blocks
+   */
+  private final ExtendedBlock[] blocks;
+  
+  /**
+   * List of volumes
+   */
+  private final List<byte[]> volumeIds;
+  
+  /**
+   * List of indexes into <code>volumeIds</code>, one per block in
+   * <code>blocks</code>. A value of Integer.MAX_VALUE indicates that the
+   * block was not found.
+   */
+  private final List<Integer> volumeIndexes;
+
+  /**
+   * Constructs HdfsBlocksMetadata.
+   * 
+   * @param blocks
+   *          List of blocks described
+   * @param volumeIds
+   *          List of potential volume identifiers, specifying volumes where 
+   *          blocks may be stored
+   * @param volumeIndexes
+   *          Indexes into the list of volume identifiers, one per block
+   */
+  public HdfsBlocksMetadata(ExtendedBlock[] blocks, List<byte[]> volumeIds, 
+      List<Integer> volumeIndexes) {
+    this.blocks = blocks;
+    this.volumeIds = volumeIds;
+    this.volumeIndexes = volumeIndexes;
+  }
+
+  /**
+   * Get the array of blocks.
+   * 
+   * @return array of blocks
+   */
+  public ExtendedBlock[] getBlocks() {
+    return blocks;
+  }
+  
+  /**
+   * Get the list of volume identifiers in raw byte form.
+   * 
+   * @return list of ids
+   */
+  public List<byte[]> getVolumeIds() {
+    return volumeIds;
+  }
+
+  /**
+   * Get a list of indexes into the array of {@link VolumeId}s, one per block.
+   * 
+   * @return list of indexes
+   */
+  public List<Integer> getVolumeIndexes() {
+    return volumeIndexes;
+  }
+}

+ 1 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -60,7 +60,7 @@ public class HdfsConstants {
   public static int MAX_PATH_LENGTH = 8000;
   public static int MAX_PATH_LENGTH = 8000;
   public static int MAX_PATH_DEPTH = 1000;
   public static int MAX_PATH_DEPTH = 1000;
 
 
-  // TODO mb@media-style.com: should be conf injected?
+  // TODO should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
   public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
       DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
       DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
@@ -84,16 +84,6 @@ public class HdfsConstants {
   // An invalid transaction ID that will never be seen in a real namesystem.
   // An invalid transaction ID that will never be seen in a real namesystem.
   public static final long INVALID_TXID = -12345;
   public static final long INVALID_TXID = -12345;
 
 
-  /**
-   * Distributed upgrade actions:
-   * 
-   * 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
-   * upgrade if it is stuck, no matter what the status is.
-   */
-  public static enum UpgradeAction {
-    GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
-  }
-
   /**
   /**
    * URI Scheme for hdfs://namenode/ URIs.
    * URI Scheme for hdfs://namenode/ URIs.
    */
    */

部分文件因文件數量過多而無法顯示