Browse Source

Merge remote-tracking branch 'apache/trunk' into HDFS-7285

Zhe Zhang 9 years ago
parent
commit
53358fe680
100 changed files with 3201 additions and 915 deletions
  1. 9 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 1 18
      hadoop-common-project/hadoop-common/pom.xml
  3. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  4. 12 2
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  5. 11 2
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  6. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  7. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  8. 0 0
      hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
  9. 0 0
      hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz
  10. 5 0
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  11. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
  12. 6 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  13. 9 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
  14. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
  15. 4 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
  16. 68 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
  17. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java
  18. 8 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
  19. 8 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
  20. 5 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
  21. 9 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  22. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
  23. 13 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
  24. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
  25. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
  26. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java
  27. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
  28. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
  29. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java
  30. 6 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  31. 13 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  32. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java
  33. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
  34. 29 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
  35. 13 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  36. 66 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java
  37. 103 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java
  38. 62 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java
  39. 146 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
  40. 135 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
  41. 79 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java
  42. 46 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java
  43. 63 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java
  44. 26 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java
  45. 22 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  46. 6 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  47. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
  48. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  49. 10 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  50. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  51. 2 66
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  52. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
  53. 7 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  54. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
  55. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  56. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
  57. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
  58. 7 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  59. 4 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
  60. 0 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
  61. 0 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
  62. 197 355
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  63. 41 106
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  64. 111 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java
  65. 82 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java
  66. 0 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  67. 25 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  68. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  69. 60 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
  70. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
  71. 53 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
  72. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
  73. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
  74. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
  75. 40 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
  76. 63 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  77. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  78. 254 100
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  79. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  80. 25 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
  81. 15 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
  82. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
  83. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
  84. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  85. 6 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
  86. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  87. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  88. 0 30
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
  89. 102 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
  90. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
  91. 25 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  92. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  93. 80 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
  94. 373 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
  95. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
  96. 216 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
  97. 63 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
  98. 138 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
  99. 138 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
  100. 1 0
      hadoop-project/src/site/site.xml

+ 9 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -756,6 +756,12 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
     HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
     (Anu Engineer via xyao)
     (Anu Engineer via xyao)
 
 
+    HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
+    (wang)
+
+    HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+    (wang via yliu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp
     HADOOP-11785. Reduce the number of listStatus operation in distcp
@@ -1114,6 +1120,9 @@ Release 2.7.2 - UNRELEASED
     HADOOP-12061. Incorrect command in single cluster setup document.
     HADOOP-12061. Incorrect command in single cluster setup document.
     (Kengo Seki via aajisaka)
     (Kengo Seki via aajisaka)
 
 
+    HADOOP-12359. hadoop fs -getmerge doc is wrong.
+    (Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 Release 2.7.1 - 2015-07-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 18
hadoop-common-project/hadoop-common/pom.xml

@@ -453,23 +453,6 @@
               </target>
               </target>
             </configuration>
             </configuration>
           </execution>
           </execution>
-          <execution>
-            <id>copy-test-tarballs</id>
-            <phase>process-test-resources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <copy toDir="${test.cache.data}">
-                  <fileset dir="${basedir}/src/test/java/org/apache/hadoop/fs">
-                    <include name="test-untar.tar"/>
-                    <include name="test-untar.tgz"/>
-                  </fileset>
-                </copy>
-              </target>
-            </configuration>
-          </execution>
           <execution>
           <execution>
             <phase>pre-site</phase>
             <phase>pre-site</phase>
             <goals>
             <goals>
@@ -505,7 +488,7 @@
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
-            <exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
+            <exclude>src/test/resources/test-untar.tgz</exclude>
             <exclude>src/test/resources/test.har/_SUCCESS</exclude>
             <exclude>src/test/resources/test.har/_SUCCESS</exclude>
             <exclude>src/test/resources/test.har/_index</exclude>
             <exclude>src/test/resources/test.har/_index</exclude>
             <exclude>src/test/resources/test.har/_masterindex</exclude>
             <exclude>src/test/resources/test.har/_masterindex</exclude>

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -85,6 +85,13 @@ public class CommonConfigurationKeysPublic {
   /** Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY */
   /** Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY */
   public static final long    FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
   public static final long    FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
 
 
+  /**
+   * Directories that cannot be removed unless empty, even by an
+   * administrator.
+   */
+  public static final String FS_PROTECTED_DIRECTORIES =
+      "fs.protected.directories";
+
   // TBD: Code is still using hardcoded values (e.g. "fs.automatic.close")
   // TBD: Code is still using hardcoded values (e.g. "fs.automatic.close")
   // instead of constant (e.g. FS_AUTOMATIC_CLOSE_KEY)
   // instead of constant (e.g. FS_AUTOMATIC_CLOSE_KEY)
   //
   //

+ 12 - 2
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -564,6 +564,16 @@ for ldap providers in the same way as above does.
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>fs.protected.directories</name>
+  <value></value>
+  <description>A comma-separated list of directories which cannot
+    be deleted even by the superuser unless they are empty. This
+    setting can be used to guard important system directories
+    against accidental deletion due to administrator error.
+  </description>
+</property>
+
 <property>
 <property>
   <name>fs.AbstractFileSystem.file.impl</name>
   <name>fs.AbstractFileSystem.file.impl</name>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
   <value>org.apache.hadoop.fs.local.LocalFs</value>
@@ -783,7 +793,7 @@ for ldap providers in the same way as above does.
 
 
 <property>
 <property>
   <name>fs.s3a.attempts.maximum</name>
   <name>fs.s3a.attempts.maximum</name>
-  <value>10</value>
+  <value>20</value>
   <description>How many times we should retry commands on transient errors.</description>
   <description>How many times we should retry commands on transient errors.</description>
 </property>
 </property>
 
 
@@ -795,7 +805,7 @@ for ldap providers in the same way as above does.
 
 
 <property>
 <property>
   <name>fs.s3a.connection.timeout</name>
   <name>fs.s3a.connection.timeout</name>
-  <value>50000</value>
+  <value>200000</value>
   <description>Socket connection timeout in milliseconds.</description>
   <description>Socket connection timeout in milliseconds.</description>
 </property>
 </property>
 
 

+ 11 - 2
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 getmerge
 --------
 --------
 
 
-Usage: `hadoop fs -getmerge <src> <localdst> [addnl]`
+Usage: `hadoop fs -getmerge [-nl] <src> <localdst>`
 
 
-Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally addnl can be set to enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally -nl can be set to enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 
 help
 help
 ----
 ----

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -77,7 +77,7 @@ import org.junit.Test;
  * </p>
  * </p>
  */
  */
 
 
-public class ViewFileSystemBaseTest {
+abstract public class ViewFileSystemBaseTest {
   FileSystem fsView;  // the view file system - the mounts are here
   FileSystem fsView;  // the view file system - the mounts are here
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsTarget;  // the target file system - the mount will point here
   Path targetTestRoot;
   Path targetTestRoot;

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -76,7 +76,7 @@ import org.mockito.Mockito;
  *     @AfterClass    public static void ClusterShutdownAtEnd()
  *     @AfterClass    public static void ClusterShutdownAtEnd()
  * </p>
  * </p>
  */
  */
-public class ViewFsBaseTest {
+abstract public class ViewFsBaseTest {
   FileContext fcView; // the view file system - the mounts are here
   FileContext fcView; // the view file system - the mounts are here
   FileContext fcTarget; // the target file system - the mount will point here
   FileContext fcTarget; // the target file system - the mount will point here
   Path targetTestRoot;
   Path targetTestRoot;

+ 0 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar → hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar


+ 0 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz → hadoop-common-project/hadoop-common/src/test/resources/test-untar.tgz


+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -31,6 +31,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <dependencies>
   <dependencies>
+    <dependency>
+      <groupId>com.squareup.okhttp</groupId>
+      <artifactId>okhttp</artifactId>
+      <version>2.4.0</version>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <artifactId>hadoop-common</artifactId>

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java


+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -22,11 +22,10 @@ import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileChannel;
 import java.util.EnumSet;
 import java.util.EnumSet;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
@@ -42,6 +41,9 @@ import org.apache.htrace.TraceScope;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
 /**
  * BlockReaderLocal enables local short circuited reads. If the DFS client is on
  * BlockReaderLocal enables local short circuited reads. If the DFS client is on
  * the same machine as the datanode, then the client can read files directly
  * the same machine as the datanode, then the client can read files directly
@@ -60,7 +62,7 @@ import com.google.common.base.Preconditions;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 class BlockReaderLocal implements BlockReader {
 class BlockReaderLocal implements BlockReader {
-  static final Log LOG = LogFactory.getLog(BlockReaderLocal.class);
+  static final Logger LOG = LoggerFactory.getLogger(BlockReaderLocal.class);
 
 
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
 
 
@@ -88,7 +90,7 @@ class BlockReaderLocal implements BlockReader {
     public Builder setCachingStrategy(CachingStrategy cachingStrategy) {
     public Builder setCachingStrategy(CachingStrategy cachingStrategy) {
       long readahead = cachingStrategy.getReadahead() != null ?
       long readahead = cachingStrategy.getReadahead() != null ?
           cachingStrategy.getReadahead() :
           cachingStrategy.getReadahead() :
-              DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT;
+              HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT;
       this.maxReadahead = (int)Math.min(Integer.MAX_VALUE, readahead);
       this.maxReadahead = (int)Math.min(Integer.MAX_VALUE, readahead);
       return this;
       return this;
     }
     }

+ 9 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java

@@ -29,8 +29,6 @@ import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Map;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.ReadOption;
@@ -45,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -55,6 +54,9 @@ import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 import org.apache.htrace.TraceScope;
 
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
 /**
  * BlockReaderLocalLegacy enables local short circuited reads. If the DFS client is on
  * BlockReaderLocalLegacy enables local short circuited reads. If the DFS client is on
  * the same machine as the datanode, then the client can read files directly
  * the same machine as the datanode, then the client can read files directly
@@ -79,7 +81,8 @@ import org.apache.htrace.TraceScope;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 class BlockReaderLocalLegacy implements BlockReader {
 class BlockReaderLocalLegacy implements BlockReader {
-  private static final Log LOG = LogFactory.getLog(BlockReaderLocalLegacy.class);
+  private static final Logger LOG = LoggerFactory.getLogger(
+      BlockReaderLocalLegacy.class);
 
 
   //Stores the cache and proxy for a local datanode.
   //Stores the cache and proxy for a local datanode.
   private static class LocalDatanodeInfo {
   private static class LocalDatanodeInfo {
@@ -112,7 +115,7 @@ class BlockReaderLocalLegacy implements BlockReader {
           proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
           proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
             @Override
             @Override
             public ClientDatanodeProtocol run() throws Exception {
             public ClientDatanodeProtocol run() throws Exception {
-              return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
+              return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf,
                   socketTimeout, connectToDnViaHostname);
                   socketTimeout, connectToDnViaHostname);
             }
             }
           });
           });
@@ -244,7 +247,7 @@ class BlockReaderLocalLegacy implements BlockReader {
     } catch (IOException e) {
     } catch (IOException e) {
       // remove from cache
       // remove from cache
       localDatanodeInfo.removeBlockLocalPathInfo(blk);
       localDatanodeInfo.removeBlockLocalPathInfo(blk);
-      DFSClient.LOG.warn("BlockReaderLocalLegacy: Removing " + blk
+      LOG.warn("BlockReaderLocalLegacy: Removing " + blk
           + " from cache because local file " + pathinfo.getBlockPath()
           + " from cache because local file " + pathinfo.getBlockPath()
           + " could not be opened.");
           + " could not be opened.");
       throw e;
       throw e;
@@ -689,7 +692,7 @@ class BlockReaderLocalLegacy implements BlockReader {
 
 
   @Override
   @Override
   public synchronized void close() throws IOException {
   public synchronized void close() throws IOException {
-    IOUtils.cleanup(LOG, dataIn, checksumIn);
+    IOUtilsClient.cleanup(LOG, dataIn, checksumIn);
     if (slowReadBuff != null) {
     if (slowReadBuff != null) {
       bufferPool.returnBuffer(slowReadBuff);
       bufferPool.returnBuffer(slowReadBuff);
       slowReadBuff = null;
       slowReadBuff = null;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java


+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs;
 
 
 import java.util.HashMap;
 import java.util.HashMap;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -32,6 +30,9 @@ import org.apache.hadoop.hdfs.util.ByteArrayManager;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
 /**
  * ClientContext contains context information for a client.
  * ClientContext contains context information for a client.
  * 
  * 
@@ -40,7 +41,7 @@ import com.google.common.annotations.VisibleForTesting;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class ClientContext {
 public class ClientContext {
-  private static final Log LOG = LogFactory.getLog(ClientContext.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ClientContext.class);
 
 
   /**
   /**
    * Global map of context names to caches contexts.
    * Global map of context names to caches contexts.

+ 68 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

@@ -22,22 +22,32 @@ import com.google.common.collect.Maps;
 import com.google.common.primitives.SignedBytes;
 import com.google.common.primitives.SignedBytes;
 import org.apache.commons.io.Charsets;
 import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
+import javax.net.SocketFactory;
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
@@ -455,4 +465,62 @@ public class DFSUtilClient {
     localAddrMap.put(addr.getHostAddress(), local);
     localAddrMap.put(addr.getHostAddress(), local);
     return local;
     return local;
   }
   }
+
+  /** Create a {@link ClientDatanodeProtocol} proxy */
+  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
+      DatanodeID datanodeid, Configuration conf, int socketTimeout,
+      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
+    return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
+        connectToDnViaHostname, locatedBlock);
+  }
+
+  /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
+  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
+      DatanodeID datanodeid, Configuration conf, int socketTimeout,
+      boolean connectToDnViaHostname) throws IOException {
+    return new ClientDatanodeProtocolTranslatorPB(
+        datanodeid, conf, socketTimeout, connectToDnViaHostname);
+  }
+
+  /** Create a {@link ClientDatanodeProtocol} proxy */
+  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
+      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+      SocketFactory factory) throws IOException {
+    return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
+  }
+
+  /**
+   * Creates a new KeyProvider from the given Configuration.
+   *
+   * @param conf Configuration
+   * @return new KeyProvider, or null if no provider was found.
+   * @throws IOException if the KeyProvider is improperly specified in
+   *                             the Configuration
+   */
+  public static KeyProvider createKeyProvider(
+      final Configuration conf) throws IOException {
+    final String providerUriStr =
+        conf.getTrimmed(HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
+    // No provider set in conf
+    if (providerUriStr.isEmpty()) {
+      return null;
+    }
+    final URI providerUri;
+    try {
+      providerUri = new URI(providerUriStr);
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+    KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
+    if (keyProvider == null) {
+      throw new IOException("Could not instantiate KeyProvider from " +
+          HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '"
+          + providerUriStr + "'");
+    }
+    if (keyProvider.isTransient()) {
+      throw new IOException("KeyProvider " + keyProvider.toString()
+          + " was found but it is a transient provider.");
+    }
+    return keyProvider;
+  }
 }
 }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java


+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java

@@ -21,14 +21,12 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.Cache;
 import com.google.common.cache.Cache;
@@ -36,10 +34,13 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import com.google.common.cache.RemovalNotification;
 
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class KeyProviderCache {
 public class KeyProviderCache {
 
 
-  public static final Log LOG = LogFactory.getLog(KeyProviderCache.class);
+  public static final Logger LOG = LoggerFactory.getLogger(KeyProviderCache.class);
 
 
   private final Cache<URI, KeyProvider> cache;
   private final Cache<URI, KeyProvider> cache;
 
 
@@ -72,7 +73,7 @@ public class KeyProviderCache {
       return cache.get(kpURI, new Callable<KeyProvider>() {
       return cache.get(kpURI, new Callable<KeyProvider>() {
         @Override
         @Override
         public KeyProvider call() throws Exception {
         public KeyProvider call() throws Exception {
-          return DFSUtil.createKeyProvider(conf);
+          return DFSUtilClient.createKeyProvider(conf);
         }
         }
       });
       });
     } catch (Exception e) {
     } catch (Exception e) {
@@ -83,11 +84,11 @@ public class KeyProviderCache {
 
 
   private URI createKeyProviderURI(Configuration conf) {
   private URI createKeyProviderURI(Configuration conf) {
     final String providerUriStr =
     final String providerUriStr =
-        conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
+        conf.getTrimmed(HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
     // No provider set in conf
     // No provider set in conf
     if (providerUriStr.isEmpty()) {
     if (providerUriStr.isEmpty()) {
       LOG.error("Could not find uri with key ["
       LOG.error("Could not find uri with key ["
-          + DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI
+          + HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI
           + "] to create a keyProvider !!");
           + "] to create a keyProvider !!");
       return null;
       return null;
     }
     }

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java

@@ -27,15 +27,16 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.LinkedListMultimap;
 import com.google.common.collect.LinkedListMultimap;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * A cache of input stream sockets to Data Node.
  * A cache of input stream sockets to Data Node.
@@ -44,7 +45,7 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @VisibleForTesting
 @VisibleForTesting
 public class PeerCache {
 public class PeerCache {
-  private static final Log LOG = LogFactory.getLog(PeerCache.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PeerCache.class);
   
   
   private static class Key {
   private static class Key {
     final DatanodeID dnID;
     final DatanodeID dnID;
@@ -188,7 +189,7 @@ public class PeerCache {
     if (peer.isClosed()) return;
     if (peer.isClosed()) return;
     if (capacity <= 0) {
     if (capacity <= 0) {
       // Cache disabled.
       // Cache disabled.
-      IOUtils.cleanup(LOG, peer);
+      IOUtilsClient.cleanup(LOG, peer);
       return;
       return;
     }
     }
     putInternal(dnId, peer);
     putInternal(dnId, peer);
@@ -222,7 +223,7 @@ public class PeerCache {
         expiryPeriod) {
         expiryPeriod) {
         break;
         break;
       }
       }
-      IOUtils.cleanup(LOG, entry.getValue().getPeer());
+      IOUtilsClient.cleanup(LOG, entry.getValue().getPeer());
       iter.remove();
       iter.remove();
     }
     }
   }
   }
@@ -241,7 +242,7 @@ public class PeerCache {
         "capacity: " + capacity);
         "capacity: " + capacity);
     }
     }
     Entry<Key, Value> entry = iter.next();
     Entry<Key, Value> entry = iter.next();
-    IOUtils.cleanup(LOG, entry.getValue().getPeer());
+    IOUtilsClient.cleanup(LOG, entry.getValue().getPeer());
     iter.remove();
     iter.remove();
   }
   }
 
 
@@ -269,7 +270,7 @@ public class PeerCache {
   @VisibleForTesting
   @VisibleForTesting
   synchronized void clear() {
   synchronized void clear() {
     for (Value value : multimap.values()) {
     for (Value value : multimap.values()) {
-      IOUtils.cleanup(LOG, value.getPeer());
+      IOUtilsClient.cleanup(LOG, value.getPeer());
     }
     }
     multimap.clear();
     multimap.clear();
   }
   }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java

@@ -50,6 +50,8 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 
 
 /**
 /**
@@ -60,6 +62,8 @@ import org.apache.htrace.TraceScope;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @Deprecated
 @Deprecated
 public class RemoteBlockReader extends FSInputChecker implements BlockReader {
 public class RemoteBlockReader extends FSInputChecker implements BlockReader {
+  static final Logger LOG = LoggerFactory.getLogger(FSInputChecker.class);
+
   private final Peer peer;
   private final Peer peer;
   private final DatanodeID datanodeID;
   private final DatanodeID datanodeID;
   private final DataInputStream in;
   private final DataInputStream in;
@@ -488,7 +492,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
   public int available() throws IOException {
   public int available() throws IOException {
     // An optimistic estimate of how much data is available
     // An optimistic estimate of how much data is available
     // to us without doing network I/O.
     // to us without doing network I/O.
-    return DFSClient.TCP_WINDOW_SIZE;
+    return RemoteBlockReader2.TCP_WINDOW_SIZE;
   }
   }
 
 
   @Override
   @Override

+ 9 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -28,8 +28,6 @@ import java.nio.channels.ReadableByteChannel;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.UUID;
 import java.util.UUID;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -56,6 +54,9 @@ import org.apache.htrace.TraceScope;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
 /**
  * This is a wrapper around connection to datanode
  * This is a wrapper around connection to datanode
  * and understands checksum, offset etc.
  * and understands checksum, offset etc.
@@ -85,16 +86,18 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class RemoteBlockReader2  implements BlockReader {
 public class RemoteBlockReader2  implements BlockReader {
 
 
-  static final Log LOG = LogFactory.getLog(RemoteBlockReader2.class);
-  
+  static final Logger LOG = LoggerFactory.getLogger(RemoteBlockReader2.class);
+  static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB;
+
   final private Peer peer;
   final private Peer peer;
   final private DatanodeID datanodeID;
   final private DatanodeID datanodeID;
   final private PeerCache peerCache;
   final private PeerCache peerCache;
   final private long blockId;
   final private long blockId;
   private final ReadableByteChannel in;
   private final ReadableByteChannel in;
+
   private DataChecksum checksum;
   private DataChecksum checksum;
-  
   private final PacketReceiver packetReceiver = new PacketReceiver(true);
   private final PacketReceiver packetReceiver = new PacketReceiver(true);
+
   private ByteBuffer curDataSlice = null;
   private ByteBuffer curDataSlice = null;
 
 
   /** offset in block of the last chunk received */
   /** offset in block of the last chunk received */
@@ -457,7 +460,7 @@ public class RemoteBlockReader2  implements BlockReader {
   public int available() throws IOException {
   public int available() throws IOException {
     // An optimistic estimate of how much data is available
     // An optimistic estimate of how much data is available
     // to us without doing network I/O.
     // to us without doing network I/O.
-    return DFSClient.TCP_WINDOW_SIZE;
+    return TCP_WINDOW_SIZE;
   }
   }
   
   
   @Override
   @Override

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java


+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

@@ -36,6 +36,14 @@ public interface HdfsClientConfigKeys {
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
 
+  String DFS_WEBHDFS_OAUTH_ENABLED_KEY = "dfs.webhdfs.oauth2.enabled";
+  boolean DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT = false;
+
+  String OAUTH_CLIENT_ID_KEY = "dfs.webhdfs.oauth2.client.id";
+  String OAUTH_REFRESH_URL_KEY = "dfs.webhdfs.oauth2.refresh.url";
+
+  String ACCESS_TOKEN_PROVIDER_KEY = "dfs.webhdfs.oauth2.access.token.provider";
+
   String PREFIX = "dfs.client.";
   String PREFIX = "dfs.client.";
   String  DFS_NAMESERVICES = "dfs.nameservices";
   String  DFS_NAMESERVICES = "dfs.nameservices";
   int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
   int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
@@ -113,6 +121,11 @@ public interface HdfsClientConfigKeys {
       "dfs.datanode.hdfs-blocks-metadata.enabled";
       "dfs.datanode.hdfs-blocks-metadata.enabled";
   boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
   boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
 
 
+  String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = "dfs.datanode.kerberos.principal";
+  String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
+  long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
+  String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
+
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
       PREFIX + "replica.accessor.builder.classes";
       PREFIX + "replica.accessor.builder.classes";
 
 

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java


+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
@@ -36,7 +36,7 @@ import org.apache.hadoop.security.token.TokenInfo;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 @KerberosInfo(
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 @TokenInfo(BlockTokenSelector.class)
 @TokenInfo(BlockTokenSelector.class)
 public interface ClientDatanodeProtocol {
 public interface ClientDatanodeProtocol {
   /**
   /**

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java


+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java

@@ -24,14 +24,14 @@ import java.io.InputStream;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 import java.nio.channels.ReadableByteChannel;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Ints;
 import com.google.common.primitives.Ints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * Class to handle reading packets one-at-a-time from the wire.
  * Class to handle reading packets one-at-a-time from the wire.
@@ -47,7 +47,7 @@ public class PacketReceiver implements Closeable {
    */
    */
   private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
   private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
 
 
-  static final Log LOG = LogFactory.getLog(PacketReceiver.class);
+  static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class);
   
   
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
   private final boolean useDirectBuffers;
   private final boolean useDirectBuffers;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.ProtocolInfo;
@@ -26,7 +26,7 @@ import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 
 
 @KerberosInfo(
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 @TokenInfo(BlockTokenSelector.class)
 @TokenInfo(BlockTokenSelector.class)
 @ProtocolInfo(protocolName = 
 @ProtocolInfo(protocolName = 
     "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol",
     "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol",

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -27,8 +27,6 @@ import javax.net.SocketFactory;
 
 
 import com.google.common.base.Optional;
 import com.google.common.base.Optional;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -70,6 +68,8 @@ import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * This class is the client side translator to translate the requests made on
  * This class is the client side translator to translate the requests made on
@@ -81,8 +81,8 @@ import com.google.protobuf.ServiceException;
 public class ClientDatanodeProtocolTranslatorPB implements
 public class ClientDatanodeProtocolTranslatorPB implements
     ProtocolMetaInterface, ClientDatanodeProtocol,
     ProtocolMetaInterface, ClientDatanodeProtocol,
     ProtocolTranslator, Closeable {
     ProtocolTranslator, Closeable {
-  public static final Log LOG = LogFactory
-      .getLog(ClientDatanodeProtocolTranslatorPB.class);
+  public static final Logger LOG = LoggerFactory
+      .getLogger(ClientDatanodeProtocolTranslatorPB.class);
   
   
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
@@ -219,7 +219,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
-    return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
+    return new BlockLocalPathInfo(PBHelperClient.convert(resp.getBlock()),
         resp.getLocalPath(), resp.getLocalMetaPath());
         resp.getLocalPath(), resp.getLocalMetaPath());
   }
   }
 
 
@@ -251,7 +251,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
     GetDatanodeInfoResponseProto response;
     GetDatanodeInfoResponseProto response;
     try {
     try {
       response = rpcProxy.getDatanodeInfo(NULL_CONTROLLER, VOID_GET_DATANODE_INFO);
       response = rpcProxy.getDatanodeInfo(NULL_CONTROLLER, VOID_GET_DATANODE_INFO);
-      return PBHelper.convert(response.getLocalInfo());
+      return PBHelperClient.convert(response.getLocalInfo());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -23,12 +23,14 @@ import com.google.protobuf.CodedInputStream;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
@@ -185,6 +187,17 @@ public class PBHelperClient {
     return pinnings;
     return pinnings;
   }
   }
 
 
+  public static ExtendedBlock convert(ExtendedBlockProto eb) {
+    if (eb == null) return null;
+    return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(),
+        eb.getGenerationStamp());
+  }
+
+  public static DatanodeLocalInfo convert(DatanodeLocalInfoProto proto) {
+    return new DatanodeLocalInfo(proto.getSoftwareVersion(),
+        proto.getConfigVersion(), proto.getUptime());
+  }
+
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
     if (di == null) return null;
     if (di == null) return null;
     return convert(di);
     return convert(di);

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java


+ 29 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java

@@ -31,6 +31,7 @@ import javax.net.ssl.SSLSocketFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -77,15 +78,42 @@ public class URLConnectionFactory {
    * try to load SSL certificates when it is specified.
    * try to load SSL certificates when it is specified.
    */
    */
   public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
   public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
+    ConnectionConfigurator conn = getSSLConnectionConfiguration(conf);
+
+    return new URLConnectionFactory(conn);
+  }
+
+  private static ConnectionConfigurator
+      getSSLConnectionConfiguration(Configuration conf) {
     ConnectionConfigurator conn = null;
     ConnectionConfigurator conn = null;
     try {
     try {
       conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
       conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.debug(
       LOG.debug(
-          "Cannot load customized ssl related configuration. Fallback to system-generic settings.",
+          "Cannot load customized ssl related configuration. Fallback to" +
+              " system-generic settings.",
           e);
           e);
       conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
       conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
     }
     }
+
+    return conn;
+  }
+
+  /**
+   * Construct a new URLConnectionFactory that supports OAut-based connections.
+   * It will also try to load the SSL configuration when they are specified.
+   */
+  public static URLConnectionFactory
+      newOAuth2URLConnectionFactory(Configuration conf) throws IOException {
+    ConnectionConfigurator conn = null;
+    try {
+      ConnectionConfigurator sslConnConfigurator
+          = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
+
+      conn = new OAuth2ConnectionConfigurator(conf, sslConnConfigurator);
+    } catch (Exception e) {
+      throw new IOException("Unable to load OAuth2 connection factory.", e);
+    }
     return new URLConnectionFactory(conn);
     return new URLConnectionFactory(conn);
   }
   }
 
 

+ 13 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -149,8 +149,19 @@ public class WebHdfsFileSystem extends FileSystem
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
 
 
-    connectionFactory = URLConnectionFactory
-        .newDefaultURLConnectionFactory(conf);
+    boolean isOAuth = conf.getBoolean(
+        HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY,
+        HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);
+
+    if(isOAuth) {
+      LOG.info("Enabling OAuth2 in WebHDFS");
+      connectionFactory = URLConnectionFactory
+          .newOAuth2URLConnectionFactory(conf);
+    } else {
+      LOG.info("Not enabling OAuth2 in WebHDFS");
+      connectionFactory = URLConnectionFactory
+          .newDefaultURLConnectionFactory(conf);
+    }
 
 
 
 
     ugi = UserGroupInformation.getCurrentUser();
     ugi = UserGroupInformation.getCurrentUser();

+ 66 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenProvider.java

@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.IOException;
+
+/**
+ * Provide an OAuth2 access token to be used to authenticate http calls in
+ * WebHDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class AccessTokenProvider implements Configurable {
+  private Configuration conf;
+
+  /**
+   * Obtain the access token that should be added to http connection's header.
+   * Will be called for each connection, so implementations should be
+   * performant. Implementations are responsible for any refreshing of
+   * the token.
+   * 
+   * @return Access token to be added to connection header.
+   */
+  abstract String getAccessToken() throws IOException;
+
+  /**
+   * Return the conf.
+   *
+   * @return the conf.
+   */
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Set the conf.
+   *
+   * @param configuration  New configuration.
+   */
+  @Override
+  public void setConf(Configuration configuration) {
+    this.conf = configuration;
+  }
+}

+ 103 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AccessTokenTimer.java

@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Timer;
+
+/**
+ * Access tokens generally expire.  This timer helps keep track of that.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class AccessTokenTimer {
+  public static final long EXPIRE_BUFFER_MS = 30 * 1000L;
+
+  private final Timer timer;
+
+  /**
+   * When the current access token will expire in milliseconds since
+   * epoch.
+   */
+  private long nextRefreshMSSinceEpoch;
+
+  public AccessTokenTimer() {
+    this(new Timer());
+  }
+
+  /**
+   * 
+   * @param timer Timer instance for unit testing
+   */
+  public AccessTokenTimer(Timer timer) {
+    this.timer = timer;
+    this.nextRefreshMSSinceEpoch = 0;
+  }
+
+  /** 
+   * Set when the access token will expire as reported by the oauth server,
+   * ie in seconds from now.
+   * @param expiresIn Access time expiration as reported by OAuth server
+   */
+  public void setExpiresIn(String expiresIn) {
+    this.nextRefreshMSSinceEpoch = convertExpiresIn(timer, expiresIn);
+  }
+
+  /**
+   * Set when the access token will expire in milliseconds from epoch,
+   * as required by the WebHDFS configuration.  This is a bit hacky and lame.
+   * 
+   * @param expiresInMSSinceEpoch Access time expiration in ms since epoch.
+   */
+  public void setExpiresInMSSinceEpoch(String expiresInMSSinceEpoch){
+    this.nextRefreshMSSinceEpoch = Long.parseLong(expiresInMSSinceEpoch);
+  }
+
+  /**
+   * Get next time we should refresh the token.
+   * 
+   * @return Next time since epoch we'll need to refresh the token.
+   */
+  public long getNextRefreshMSSinceEpoch() {
+    return nextRefreshMSSinceEpoch;
+  }
+  
+  /**
+   * Return true if the current token has expired or will expire within the
+   * EXPIRE_BUFFER_MS (to give ample wiggle room for the call to be made to
+   * the server).
+   */
+  public boolean shouldRefresh() {
+    long lowerLimit = nextRefreshMSSinceEpoch - EXPIRE_BUFFER_MS;
+    long currTime = timer.now();
+    return currTime > lowerLimit;
+  }
+  
+  /**
+   * The expires_in param from OAuth is in seconds-from-now.  Convert to
+   * milliseconds-from-epoch
+   */
+  static Long convertExpiresIn(Timer timer, String expiresInSecs) {
+    long expiresSecs = Long.parseLong(expiresInSecs);
+    long expiresMs = expiresSecs * 1000;
+    return timer.now() + expiresMs;
+  }
+  
+}

+ 62 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfCredentialBasedAccessTokenProvider.java

@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Obtain an access token via a a credential (provided through the
+ * Configuration) using the 
+ * <a href="https://tools.ietf.org/html/rfc6749#section-4.4">
+ *   Client Credentials Grant workflow</a>.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ConfCredentialBasedAccessTokenProvider
+    extends CredentialBasedAccessTokenProvider {
+  private String credential;
+
+  public ConfCredentialBasedAccessTokenProvider() {
+  }
+
+  public ConfCredentialBasedAccessTokenProvider(Timer timer) {
+    super(timer);
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    credential = notNull(conf, OAUTH_CREDENTIAL_KEY);
+  }
+
+  @Override
+  public String getCredential() {
+    if(credential == null) {
+      throw new IllegalArgumentException("Credential has not been " +
+          "provided in configuration");
+    }
+    
+    return credential;
+  }
+}

+ 146 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java

@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.OkHttpClient;
+import com.squareup.okhttp.Request;
+import com.squareup.okhttp.RequestBody;
+import com.squareup.okhttp.Response;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.URLENCODED;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Supply a access token obtained via a refresh token (provided through the
+ * Configuration using the second half of the
+ * <a href="https://tools.ietf.org/html/rfc6749#section-4.1">
+ *   Authorization Code Grant workflow</a>.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ConfRefreshTokenBasedAccessTokenProvider
+    extends AccessTokenProvider {
+  
+  public static final String OAUTH_REFRESH_TOKEN_KEY
+      = "dfs.webhdfs.oauth2.refresh.token";
+  public static final String OAUTH_REFRESH_TOKEN_EXPIRES_KEY
+      = "dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch";
+
+  private AccessTokenTimer accessTokenTimer;
+  
+  private String accessToken;
+  
+  private String refreshToken;
+  
+  private String clientId;
+  
+  private String refreshURL;
+
+  
+  public ConfRefreshTokenBasedAccessTokenProvider() {
+    this.accessTokenTimer = new AccessTokenTimer();
+  }
+  
+  public ConfRefreshTokenBasedAccessTokenProvider(Timer timer) {
+    this.accessTokenTimer = new AccessTokenTimer(timer);
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    refreshToken = notNull(conf, (OAUTH_REFRESH_TOKEN_KEY));
+    
+    accessTokenTimer.setExpiresInMSSinceEpoch(
+        notNull(conf, OAUTH_REFRESH_TOKEN_EXPIRES_KEY));
+
+    clientId = notNull(conf, OAUTH_CLIENT_ID_KEY);
+    refreshURL = notNull(conf, OAUTH_REFRESH_URL_KEY);
+    
+  }
+
+  @Override
+  public synchronized String getAccessToken() throws IOException {
+    if(accessTokenTimer.shouldRefresh()) {
+      refresh();
+    }
+    
+    return accessToken;
+  }
+  
+  void refresh() throws IOException {
+    try {
+      OkHttpClient client = new OkHttpClient();
+      client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+      client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+                TimeUnit.MILLISECONDS);
+
+      String bodyString = Utils.postBody(GRANT_TYPE, REFRESH_TOKEN,
+          REFRESH_TOKEN, refreshToken,
+          CLIENT_ID, clientId);
+
+      RequestBody body = RequestBody.create(URLENCODED, bodyString);
+
+      Request request = new Request.Builder()
+          .url(refreshURL)
+          .post(body)
+          .build();
+      Response responseBody = client.newCall(request).execute();
+
+      if (responseBody.code() != HttpStatus.SC_OK) {
+        throw new IllegalArgumentException("Received invalid http response: "
+            + responseBody.code() + ", text = " + responseBody.toString());
+      }
+
+      ObjectMapper mapper = new ObjectMapper();
+      Map<?, ?> response = mapper.reader(Map.class)
+          .readValue(responseBody.body().string());
+
+
+      String newExpiresIn = response.get(EXPIRES_IN).toString();
+      accessTokenTimer.setExpiresIn(newExpiresIn);
+
+      accessToken = response.get(ACCESS_TOKEN).toString();
+    } catch (Exception e) {
+      throw new IOException("Exception while refreshing access token", e);
+    }
+  }
+  
+  public String getRefreshToken() {
+    return refreshToken;
+  }
+}

+ 135 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java

@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.OkHttpClient;
+import com.squareup.okhttp.Request;
+import com.squareup.okhttp.RequestBody;
+import com.squareup.okhttp.Response;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_CREDENTIALS;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_SECRET;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.URLENCODED;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Obtain an access token via the credential-based OAuth2 workflow.  This
+ * abstract class requires only that implementations provide the credential,
+ * which the class then uses to obtain a refresh token.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class CredentialBasedAccessTokenProvider
+    extends AccessTokenProvider {
+  public static final String OAUTH_CREDENTIAL_KEY
+      = "dfs.webhdfs.oauth2.credential";
+  
+  private AccessTokenTimer timer;
+  
+  private String clientId;
+  
+  private String refreshURL;
+  
+  private String accessToken;
+  
+  private boolean initialCredentialObtained = false;
+
+  CredentialBasedAccessTokenProvider() {
+    this.timer = new AccessTokenTimer();
+  }
+  
+  CredentialBasedAccessTokenProvider(Timer timer) {
+    this.timer = new AccessTokenTimer(timer);
+  }
+  
+  abstract String getCredential();
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    clientId = notNull(conf, OAUTH_CLIENT_ID_KEY);
+    refreshURL = notNull(conf, OAUTH_REFRESH_URL_KEY);
+  }
+
+  @Override
+  public synchronized String getAccessToken() throws IOException {
+    if(timer.shouldRefresh() || !initialCredentialObtained) {
+      refresh();
+      initialCredentialObtained = true;
+    }
+    
+    return accessToken;
+  }
+  
+  void refresh() throws IOException {
+    try {
+      OkHttpClient client = new OkHttpClient();
+      client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+      client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
+          TimeUnit.MILLISECONDS);
+
+      String bodyString = Utils.postBody(CLIENT_SECRET, getCredential(),
+          GRANT_TYPE, CLIENT_CREDENTIALS,
+          CLIENT_ID, clientId);
+
+      RequestBody body = RequestBody.create(URLENCODED, bodyString);
+
+      Request request = new Request.Builder()
+          .url(refreshURL)
+          .post(body)
+          .build();
+      Response responseBody = client.newCall(request).execute();
+
+      if (responseBody.code() != HttpStatus.SC_OK) {
+        throw new IllegalArgumentException("Received invalid http response: "
+            + responseBody.code() + ", text = " + responseBody.toString());
+      }
+
+      ObjectMapper mapper = new ObjectMapper();
+      Map<?, ?> response = mapper.reader(Map.class)
+          .readValue(responseBody.body().string());
+      
+      String newExpiresIn = response.get(EXPIRES_IN).toString();
+      timer.setExpiresIn(newExpiresIn);
+
+      accessToken = response.get(ACCESS_TOKEN).toString();
+
+    } catch (Exception e) {
+      throw new IOException("Unable to obtain access token from credential", e);
+    }
+  }
+}

+ 79 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2ConnectionConfigurator.java

@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
+
+/**
+ * Configure a connection to use OAuth2 authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class OAuth2ConnectionConfigurator implements ConnectionConfigurator {
+  
+  public static final String HEADER = "Bearer ";
+  
+  private final AccessTokenProvider accessTokenProvider;
+ 
+  private ConnectionConfigurator sslConfigurator = null;
+  
+  public OAuth2ConnectionConfigurator(Configuration conf) {
+    this(conf, null);
+  }
+  
+  @SuppressWarnings("unchecked")
+  public OAuth2ConnectionConfigurator(Configuration conf,
+                                      ConnectionConfigurator sslConfigurator) {
+    this.sslConfigurator = sslConfigurator;
+    
+    notNull(conf, ACCESS_TOKEN_PROVIDER_KEY);
+    
+    Class accessTokenProviderClass = conf.getClass(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class,
+        AccessTokenProvider.class);
+    
+    accessTokenProvider = (AccessTokenProvider) ReflectionUtils
+        .newInstance(accessTokenProviderClass, conf);
+    accessTokenProvider.setConf(conf);
+  }
+  
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn)
+      throws IOException {
+    if(sslConfigurator != null) {
+      sslConfigurator.configure(conn);
+    }
+    
+    String accessToken = accessTokenProvider.getAccessToken();
+    
+    conn.setRequestProperty("AUTHORIZATION", HEADER + accessToken);
+    
+    return conn;
+  }
+}

+ 46 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/OAuth2Constants.java

@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import com.squareup.okhttp.MediaType;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Sundry constants relating to OAuth2 within WebHDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class OAuth2Constants {
+  private OAuth2Constants() { /** Private constructor. **/ }
+
+  public static final MediaType URLENCODED
+      = MediaType.parse("application/x-www-form-urlencoded; charset=utf-8");
+  
+  /* Constants for OAuth protocol */ 
+  public static final String ACCESS_TOKEN = "access_token";
+  public static final String BEARER = "bearer";
+  public static final String CLIENT_CREDENTIALS = "client_credentials";
+  public static final String CLIENT_ID = "client_id";
+  public static final String CLIENT_SECRET = "client_secret";
+  public static final String EXPIRES_IN = "expires_in";
+  public static final String GRANT_TYPE = "grant_type";
+  public static final String REFRESH_TOKEN = "refresh_token";
+  public static final String TOKEN_TYPE = "token_type";
+}

+ 63 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/Utils.java

@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+final class Utils {
+  private Utils() { /* Private constructor */ }
+
+  public static String notNull(Configuration conf, String key) {
+    String value = conf.get(key);
+
+    if(value == null) {
+      throw new IllegalArgumentException("No value for " + key +
+          " found in conf file.");
+    }
+
+    return value;
+  }
+  
+  public static String postBody(String ... kv)
+      throws UnsupportedEncodingException {
+    if(kv.length % 2 != 0) {
+      throw new IllegalArgumentException("Arguments must be key value pairs");
+    }
+    StringBuilder sb = new StringBuilder();
+    int i = 0;
+    
+    while(i < kv.length) {
+      if(i > 0) {
+        sb.append("&");
+      }
+      sb.append(URLEncoder.encode(kv[i++], "UTF-8"));
+      sb.append("=");
+      sb.append(URLEncoder.encode(kv[i++], "UTF-8"));
+    }
+    
+    return sb.toString();
+  }
+}

+ 26 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/package-info.java

@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * OAuth2-based WebHDFS authentication.
+ */
+@InterfaceAudience.Public
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.classification.InterfaceAudience;

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -357,6 +357,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
     HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
     via kihwal)
     via kihwal)
 
 
+    HDFS-8155. Support OAuth2 in WebHDFS. (jghoman)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
     HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
@@ -855,6 +857,23 @@ Release 2.8.0 - UNRELEASED
 
 
     HDFS-8865. Improve quota initialization performance. (kihwal)
     HDFS-8865. Improve quota initialization performance. (kihwal)
 
 
+    HDFS-8938. Extract BlockToMarkCorrupt and ReplicationWork as standalone
+    classes from BlockManager. (Mingliang Liu via wheat9)
+
+    HDFS-8925. Move BlockReaderLocal to hdfs-client.
+    (Mingliang Liu via wheat9)
+
+    HDFS-8983. NameNode support for protected directories. (Arpit Agarwal)
+
+    HDFS-8980. Remove unnecessary block replacement in INodeFile. (jing9)
+
+    HDFS-8990. Move RemoteBlockReader to hdfs-client module.
+    (Mingliang via wheat9)
+
+    HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
+
+    HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@@ -1248,6 +1267,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
     HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
     (Colin Patrick McCabe via yliu)
     (Colin Patrick McCabe via yliu)
 
 
+    HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
+    the allowed list (Daniel Templeton)
+
 Release 2.7.2 - UNRELEASED
 Release 2.7.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -213,6 +213,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>leveldbjni-all</artifactId>
       <artifactId>leveldbjni-all</artifactId>
       <version>1.8</version>
       <version>1.8</version>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.mock-server</groupId>
+      <artifactId>mockserver-netty</artifactId>
+      <version>3.9.2</version>
+      <scope>test</scope>
+    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
     <dependency>
       <groupId>org.bouncycastle</groupId>
       <groupId>org.bouncycastle</groupId>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java

@@ -83,7 +83,7 @@ class BookKeeperEditLogInputStream extends EditLogInputStream {
     tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
     tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
     DataInputStream in = new DataInputStream(tracker);
     DataInputStream in = new DataInputStream(tracker);
 
 
-    reader = new FSEditLogOp.Reader(in, tracker, logVersion);
+    reader = FSEditLogOp.Reader.create(in, tracker, logVersion);
   }
   }
 
 
   @Override
   @Override

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -205,7 +205,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     DataEncryptionKeyFactory {
     DataEncryptionKeyFactory {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
-  static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
 
 
   private final Configuration conf;
   private final Configuration conf;
   private final DfsClientConf dfsClientConf;
   private final DfsClientConf dfsClientConf;

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -81,8 +81,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY = "dfs.datanode.balance.max.concurrent.moves";
   public static final String  DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY = "dfs.datanode.balance.max.concurrent.moves";
   public static final int     DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 5;
   public static final int     DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 5;
-  public static final String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
-  public static final long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
+  @Deprecated
+  public static final String  DFS_DATANODE_READAHEAD_BYTES_KEY =
+      HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY;
+  @Deprecated
+  public static final long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT =
+      HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT;
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes";
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes";
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
   public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
   public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
@@ -516,7 +520,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
   public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
   public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
   public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
   public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
   public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
-  public static final String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = "dfs.datanode.kerberos.principal";
+  public static final String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
+      HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
   @Deprecated
   @Deprecated
   public static final String  DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
   public static final String  DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
   public static final String  DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths";
   public static final String  DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths";
@@ -615,7 +620,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
   public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
-  public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
+  public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
+      HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI;
 
 
   // Journal-node related configs. These are read on the JN side.
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -363,7 +363,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       ClientDatanodeProtocol cdp = null;
       ClientDatanodeProtocol cdp = null;
       
       
       try {
       try {
-        cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode,
+        cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode,
             dfsClient.getConfiguration(), conf.getSocketTimeout(),
             dfsClient.getConfiguration(), conf.getSocketTimeout(),
             conf.isConnectToDnViaHostname(), locatedblock);
             conf.isConnectToDnViaHostname(), locatedblock);
         
         

+ 2 - 66
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -54,8 +54,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadLocalRandom;
 
 
-import javax.net.SocketFactory;
-
+import com.google.common.collect.Sets;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Option;
@@ -69,16 +68,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
@@ -933,29 +927,6 @@ public class DFSUtil {
   public static int roundBytesToGB(long bytes) {
   public static int roundBytesToGB(long bytes) {
     return Math.round((float)bytes/ 1024 / 1024 / 1024);
     return Math.round((float)bytes/ 1024 / 1024 / 1024);
   }
   }
-  
-  /** Create a {@link ClientDatanodeProtocol} proxy */
-  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
-    return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
-        connectToDnViaHostname, locatedBlock);
-  }
-  
-  /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
-  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      boolean connectToDnViaHostname) throws IOException {
-    return new ClientDatanodeProtocolTranslatorPB(
-        datanodeid, conf, socketTimeout, connectToDnViaHostname);
-  }
-  
-  /** Create a {@link ClientDatanodeProtocol} proxy */
-  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-      SocketFactory factory) throws IOException {
-    return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
-  }
 
 
   /**
   /**
    * Get nameservice Id for the {@link NameNode} based on namenode RPC address
    * Get nameservice Id for the {@link NameNode} based on namenode RPC address
@@ -1450,41 +1421,6 @@ public class DFSUtil {
     }
     }
   }
   }
 
 
-  /**
-   * Creates a new KeyProvider from the given Configuration.
-   *
-   * @param conf Configuration
-   * @return new KeyProvider, or null if no provider was found.
-   * @throws IOException if the KeyProvider is improperly specified in
-   *                             the Configuration
-   */
-  public static KeyProvider createKeyProvider(
-      final Configuration conf) throws IOException {
-    final String providerUriStr =
-        conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
-    // No provider set in conf
-    if (providerUriStr.isEmpty()) {
-      return null;
-    }
-    final URI providerUri;
-    try {
-      providerUri = new URI(providerUriStr);
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-    KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
-    if (keyProvider == null) {
-      throw new IOException("Could not instantiate KeyProvider from " + 
-          DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + 
-          providerUriStr +"'");
-    }
-    if (keyProvider.isTransient()) {
-      throw new IOException("KeyProvider " + keyProvider.toString()
-          + " was found but it is a transient provider.");
-    }
-    return keyProvider;
-  }
-
   /**
   /**
    * Creates a new KeyProviderCryptoExtension by wrapping the
    * Creates a new KeyProviderCryptoExtension by wrapping the
    * KeyProvider specified in the given Configuration.
    * KeyProvider specified in the given Configuration.
@@ -1496,7 +1432,7 @@ public class DFSUtil {
    */
    */
   public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
   public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
       final Configuration conf) throws IOException {
       final Configuration conf) throws IOException {
-    KeyProvider keyProvider = createKeyProvider(conf);
+    KeyProvider keyProvider = DFSUtilClient.createKeyProvider(conf);
     if (keyProvider == null) {
     if (keyProvider == null) {
       return null;
       return null;
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java

@@ -87,7 +87,7 @@ public class LayoutVersion {
     FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
     FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
     FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
     FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
     REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"),
     REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"),
-    EDITS_CHESKUM(-28, "Support checksum for editlog"),
+    EDITS_CHECKSUM(-28, "Support checksum for editlog"),
     UNUSED(-29, "Skipped version"),
     UNUSED(-29, "Skipped version"),
     FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
     FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
     RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true,
     RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true,

+ 7 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto;
@@ -115,7 +114,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+      readBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
         proto.getOffset(),
         proto.getOffset(),
@@ -136,7 +135,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+      writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
           PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
           proto.getHeader().getClientName(),
           proto.getHeader().getClientName(),
@@ -167,7 +166,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+      transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
           PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
           PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
           proto.getHeader().getClientName(),
           proto.getHeader().getClientName(),
           targets,
           targets,
@@ -186,7 +185,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
+      requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()),
           PBHelper.convert(proto.getHeader().getToken()),
           PBHelper.convert(proto.getHeader().getToken()),
           slotId, proto.getMaxVersion(),
           slotId, proto.getMaxVersion(),
           proto.getSupportsReceiptVerification());
           proto.getSupportsReceiptVerification());
@@ -228,7 +227,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
+      replaceBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelper.convert(proto.getHeader().getToken()),
           PBHelper.convert(proto.getHeader().getToken()),
           proto.getDelHint(),
           proto.getDelHint(),
@@ -244,7 +243,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      copyBlock(PBHelper.convert(proto.getHeader().getBlock()),
+      copyBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
           PBHelper.convert(proto.getHeader().getToken()));
           PBHelper.convert(proto.getHeader().getToken()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
@@ -257,7 +256,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-    blockChecksum(PBHelper.convert(proto.getHeader().getBlock()),
+    blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()),
         PBHelper.convert(proto.getHeader().getToken()));
         PBHelper.convert(proto.getHeader().getToken()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java

@@ -18,8 +18,6 @@
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
 import com.google.common.base.Optional;
 import com.google.common.base.Optional;
@@ -86,7 +84,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     long len;
     long len;
     try {
     try {
-      len = impl.getReplicaVisibleLength(PBHelper.convert(request.getBlock()));
+      len = impl.getReplicaVisibleLength(PBHelperClient.convert(request.getBlock()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -123,7 +121,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     BlockLocalPathInfo resp;
     BlockLocalPathInfo resp;
     try {
     try {
-      resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken()));
+      resp = impl.getBlockLocalPathInfo(PBHelperClient.convert(request.getBlock()), PBHelper.convert(request.getToken()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -482,7 +482,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
       AbandonBlockRequestProto req) throws ServiceException {
       AbandonBlockRequestProto req) throws ServiceException {
     try {
     try {
-      server.abandonBlock(PBHelper.convert(req.getB()), req.getFileId(),
+      server.abandonBlock(PBHelperClient.convert(req.getB()), req.getFileId(),
           req.getSrc(), req.getHolder());
           req.getSrc(), req.getHolder());
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -500,7 +500,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       LocatedBlock result = server.addBlock(
       LocatedBlock result = server.addBlock(
           req.getSrc(),
           req.getSrc(),
           req.getClientName(),
           req.getClientName(),
-          req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
+          req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null,
           (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
           (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
           (favor == null || favor.size() == 0) ? null : favor
           (favor == null || favor.size() == 0) ? null : favor
@@ -521,7 +521,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       List<String> existingStorageIDsList = req.getExistingStorageUuidsList();
       List<String> existingStorageIDsList = req.getExistingStorageUuidsList();
       List<DatanodeInfoProto> excludesList = req.getExcludesList();
       List<DatanodeInfoProto> excludesList = req.getExcludesList();
       LocatedBlock result = server.getAdditionalDatanode(req.getSrc(),
       LocatedBlock result = server.getAdditionalDatanode(req.getSrc(),
-          req.getFileId(), PBHelper.convert(req.getBlk()),
+          req.getFileId(), PBHelperClient.convert(req.getBlk()),
           PBHelper.convert(existingList.toArray(
           PBHelper.convert(existingList.toArray(
               new DatanodeInfoProto[existingList.size()])),
               new DatanodeInfoProto[existingList.size()])),
           existingStorageIDsList.toArray(
           existingStorageIDsList.toArray(
@@ -543,7 +543,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     try {
     try {
       boolean result = 
       boolean result = 
           server.complete(req.getSrc(), req.getClientName(),
           server.complete(req.getSrc(), req.getClientName(),
-          req.hasLast() ? PBHelper.convert(req.getLast()) : null,
+          req.hasLast() ? PBHelperClient.convert(req.getLast()) : null,
           req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID);
           req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID);
       return CompleteResponseProto.newBuilder().setResult(result).build();
       return CompleteResponseProto.newBuilder().setResult(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -962,7 +962,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     try {
     try {
       LocatedBlockProto result = PBHelper.convertLocatedBlock(
       LocatedBlockProto result = PBHelper.convertLocatedBlock(
-          server.updateBlockForPipeline(PBHelper.convert(req.getBlock()),
+          server.updateBlockForPipeline(PBHelperClient.convert(req.getBlock()),
               req.getClientName()));
               req.getClientName()));
       return UpdateBlockForPipelineResponseProto.newBuilder().setBlock(result)
       return UpdateBlockForPipelineResponseProto.newBuilder().setBlock(result)
           .build();
           .build();
@@ -978,8 +978,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       List<DatanodeIDProto> newNodes = req.getNewNodesList();
       List<DatanodeIDProto> newNodes = req.getNewNodesList();
       List<String> newStorageIDs = req.getStorageIDsList();
       List<String> newStorageIDs = req.getStorageIDsList();
       server.updatePipeline(req.getClientName(),
       server.updatePipeline(req.getClientName(),
-          PBHelper.convert(req.getOldBlock()),
-          PBHelper.convert(req.getNewBlock()),
+          PBHelperClient.convert(req.getOldBlock()),
+          PBHelperClient.convert(req.getNewBlock()),
           PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
           PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
           newStorageIDs.toArray(new String[newStorageIDs.size()]));
           newStorageIDs.toArray(new String[newStorageIDs.size()]));
       return VOID_UPDATEPIPELINE_RESPONSE;
       return VOID_UPDATEPIPELINE_RESPONSE;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java

@@ -281,7 +281,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     final List<String> sidprotos = request.getNewTargetStoragesList();
     final List<String> sidprotos = request.getNewTargetStoragesList();
     final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
     final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
     try {
     try {
-      impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
+      impl.commitBlockSynchronization(PBHelperClient.convert(request.getBlock()),
           request.getNewGenStamp(), request.getNewLength(),
           request.getNewGenStamp(), request.getNewLength(),
           request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
           request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
     } catch (IOException e) {
     } catch (IOException e) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java

@@ -76,7 +76,7 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
     final String storageID;
     final String storageID;
     try {
     try {
       storageID = impl.updateReplicaUnderRecovery(
       storageID = impl.updateReplicaUnderRecovery(
-          PBHelper.convert(request.getBlock()), request.getRecoveryId(),
+          PBHelperClient.convert(request.getBlock()), request.getRecoveryId(),
           request.getNewBlockId(), request.getNewLength());
           request.getNewBlockId(), request.getNewLength());
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);

+ 7 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -23,9 +23,7 @@ import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
 import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto;
 import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto;
 import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
 import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
 
 
-import java.io.EOFException;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
@@ -118,7 +116,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsS
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
@@ -141,6 +138,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
@@ -159,7 +157,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
@@ -233,21 +230,17 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
-import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
-import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Shorts;
 import com.google.common.primitives.Shorts;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedInputStream;
 
 
 /**
 /**
  * Utilities for converting protobuf classes to and from implementation classes
  * Utilities for converting protobuf classes to and from implementation classes
@@ -600,13 +593,7 @@ public class PBHelper {
       return new NamenodeCommand(cmd.getAction());
       return new NamenodeCommand(cmd.getAction());
     }
     }
   }
   }
-  
-  public static ExtendedBlock convert(ExtendedBlockProto eb) {
-    if (eb == null) return null;
-    return new ExtendedBlock( eb.getPoolId(),  eb.getBlockId(),   eb.getNumBytes(),
-       eb.getGenerationStamp());
-  }
-  
+
   public static RecoveringBlockProto convert(RecoveringBlock b) {
   public static RecoveringBlockProto convert(RecoveringBlock b) {
     if (b == null) {
     if (b == null) {
       return null;
       return null;
@@ -620,7 +607,7 @@ public class PBHelper {
   }
   }
 
 
   public static RecoveringBlock convert(RecoveringBlockProto b) {
   public static RecoveringBlock convert(RecoveringBlockProto b) {
-    ExtendedBlock block = convert(b.getBlock().getB());
+    ExtendedBlock block = PBHelperClient.convert(b.getBlock().getB());
     DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
     DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
     return (b.hasTruncateBlock()) ?
     return (b.hasTruncateBlock()) ?
         new RecoveringBlock(block, locs, PBHelper.convert(b.getTruncateBlock())) :
         new RecoveringBlock(block, locs, PBHelper.convert(b.getTruncateBlock())) :
@@ -786,11 +773,11 @@ public class PBHelper {
 
 
     final LocatedBlock lb;
     final LocatedBlock lb;
     if (indices == null) {
     if (indices == null) {
-      lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, storageIDs,
-          storageTypes, proto.getOffset(), proto.getCorrupt(),
+      lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets,
+          storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(),
           cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
           cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
     } else {
     } else {
-      lb = new LocatedStripedBlock(PBHelper.convert(proto.getB()), targets,
+      lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets,
           storageIDs, storageTypes, indices, proto.getOffset(),
           storageIDs, storageTypes, indices, proto.getOffset(),
           proto.getCorrupt(),
           proto.getCorrupt(),
           cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
           cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
@@ -2193,12 +2180,6 @@ public class PBHelper {
     return builder.build();
     return builder.build();
   }
   }
 
 
-  public static DatanodeLocalInfo convert(DatanodeLocalInfoProto proto) {
-    return new DatanodeLocalInfo(proto.getSoftwareVersion(),
-        proto.getConfigVersion(), proto.getUptime());
-  }
-
-
   private static AclEntryScopeProto convert(AclEntryScope v) {
   private static AclEntryScopeProto convert(AclEntryScope v) {
     return AclEntryScopeProto.valueOf(v.ordinal());
     return AclEntryScopeProto.valueOf(v.ordinal());
   }
   }
@@ -3011,7 +2992,7 @@ public class PBHelper {
   public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
   public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
       BlockECRecoveryInfoProto blockEcRecoveryInfoProto) {
       BlockECRecoveryInfoProto blockEcRecoveryInfoProto) {
     ExtendedBlockProto blockProto = blockEcRecoveryInfoProto.getBlock();
     ExtendedBlockProto blockProto = blockEcRecoveryInfoProto.getBlock();
-    ExtendedBlock block = convert(blockProto);
+    ExtendedBlock block = PBHelperClient.convert(blockProto);
 
 
     DatanodeInfosProto sourceDnInfosProto = blockEcRecoveryInfoProto
     DatanodeInfosProto sourceDnInfosProto = blockEcRecoveryInfoProto
         .getSourceDnInfos();
         .getSourceDnInfos();

+ 4 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -22,6 +22,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -35,6 +36,7 @@ import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
  * where the replicas of the block, or blocks belonging to the erasure coding
  * where the replicas of the block, or blocks belonging to the erasure coding
  * block group, are stored.
  * block group, are stored.
  */
  */
+@InterfaceAudience.Private
 public abstract class BlockInfo extends Block
 public abstract class BlockInfo extends Block
     implements LightWeightGSet.LinkedElement {
     implements LightWeightGSet.LinkedElement {
 
 
@@ -203,12 +205,6 @@ public abstract class BlockInfo extends Block
    */
    */
   abstract boolean removeStorage(DatanodeStorageInfo storage);
   abstract boolean removeStorage(DatanodeStorageInfo storage);
 
 
-  /**
-   * Replace the current BlockInfo with the new one in corresponding
-   * DatanodeStorageInfo's linked list
-   */
-  abstract void replaceBlock(BlockInfo newBlock);
-
   public abstract boolean isStriped();
   public abstract boolean isStriped();
 
 
   /** @return true if there is no datanode storage associated with the block */
   /** @return true if there is no datanode storage associated with the block */
@@ -375,19 +371,12 @@ public abstract class BlockInfo extends Block
   }
   }
 
 
   /**
   /**
-   * Convert an under construction block to a complete block.
-   *
-   * @return BlockInfo - a complete block.
-   * @throws IOException if the state of the block
-   * (the generation stamp and the length) has not been committed by
-   * the client or it does not have at least a minimal number of replicas
-   * reported from data-nodes.
+   * Convert an under construction block to complete.
    */
    */
-  BlockInfo convertToCompleteBlock() throws IOException {
+  void convertToCompleteBlock() {
     assert getBlockUCState() != BlockUCState.COMPLETE :
     assert getBlockUCState() != BlockUCState.COMPLETE :
         "Trying to convert a COMPLETE block";
         "Trying to convert a COMPLETE block";
     uc = null;
     uc = null;
-    return this;
   }
   }
 
 
   /**
   /**

+ 0 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java

@@ -96,21 +96,6 @@ public class BlockInfoContiguous extends BlockInfo {
     return 0;
     return 0;
   }
   }
 
 
-  @Override
-  void replaceBlock(BlockInfo newBlock) {
-    assert newBlock instanceof BlockInfoContiguous;
-    for (int i = this.numNodes() - 1; i >= 0; i--) {
-      final DatanodeStorageInfo storage = this.getStorageInfo(i);
-      final boolean removed = storage.removeBlock(this);
-      assert removed : "currentBlock not found.";
-
-      final DatanodeStorageInfo.AddBlockResult result = storage.addBlock(
-          newBlock, newBlock);
-      assert result == DatanodeStorageInfo.AddBlockResult.ADDED :
-          "newBlock already exists.";
-    }
-  }
-
   @Override
   @Override
   public final boolean isStriped() {
   public final boolean isStriped() {
     return false;
     return false;

+ 0 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java

@@ -194,25 +194,6 @@ public class BlockInfoStriped extends BlockInfo {
     }
     }
   }
   }
 
 
-  @Override
-  void replaceBlock(BlockInfo newBlock) {
-    assert newBlock instanceof BlockInfoStriped;
-    BlockInfoStriped newBlockGroup = (BlockInfoStriped) newBlock;
-    final int size = getCapacity();
-    newBlockGroup.ensureCapacity(size, false);
-    for (int i = 0; i < size; i++) {
-      final DatanodeStorageInfo storage = this.getStorageInfo(i);
-      if (storage != null) {
-        final int blockIndex = indices[i];
-        final boolean removed = storage.removeBlock(this);
-        assert removed : "currentBlock not found.";
-
-        newBlockGroup.addStorage(storage, i, blockIndex);
-        storage.insertToList(newBlockGroup);
-      }
-    }
-  }
-
   public long spaceConsumed() {
   public long spaceConsumed() {
     // In case striped blocks, total usage by this striped blocks should
     // In case striped blocks, total usage by this striped blocks should
     // be the total of data blocks and parity blocks because
     // be the total of data blocks and parity blocks because

+ 197 - 355
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -674,43 +674,34 @@ public class BlockManager implements BlockStatsMXBean {
       return false; // already completed (e.g. by syncBlock)
       return false; // already completed (e.g. by syncBlock)
     
     
     final boolean b = commitBlock(lastBlock, commitBlock);
     final boolean b = commitBlock(lastBlock, commitBlock);
-    if (hasMinStorage(lastBlock)) {
-      completeBlock(bc, bc.numBlocks() - 1, false);
+      if (hasMinStorage(lastBlock)) {
+      completeBlock(lastBlock, false);
     }
     }
     return b;
     return b;
   }
   }
 
 
   /**
   /**
    * Convert a specified block of the file to a complete block.
    * Convert a specified block of the file to a complete block.
-   * @param bc file
-   * @param blkIndex  block index in the file
    * @throws IOException if the block does not have at least a minimal number
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    * of replicas reported from data-nodes.
    */
    */
-  private BlockInfo completeBlock(final BlockCollection bc,
-      final int blkIndex, boolean force) throws IOException {
-    if (blkIndex < 0) {
-      return null;
-    }
-    BlockInfo curBlock = bc.getBlocks()[blkIndex];
+  private void completeBlock(BlockInfo curBlock, boolean force)
+      throws IOException {
     if (curBlock.isComplete()) {
     if (curBlock.isComplete()) {
-      return curBlock;
+      return;
     }
     }
 
 
     int numNodes = curBlock.numNodes();
     int numNodes = curBlock.numNodes();
     if (!force && !hasMinStorage(curBlock, numNodes)) {
     if (!force && !hasMinStorage(curBlock, numNodes)) {
-      throw new IOException("Cannot complete block: " +
-          "block does not satisfy minimal replication requirement.");
+      throw new IOException("Cannot complete block: "
+          + "block does not satisfy minimal replication requirement.");
     }
     }
     if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
     if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
       throw new IOException(
       throw new IOException(
           "Cannot complete block: block has not been COMMITTED by the client");
           "Cannot complete block: block has not been COMMITTED by the client");
     }
     }
 
 
-    final BlockInfo completeBlock = curBlock.convertToCompleteBlock();
-    // replace penultimate block in file
-    bc.setBlock(blkIndex, completeBlock);
-    
+    curBlock.convertToCompleteBlock();
     // Since safe-mode only counts complete blocks, and we now have
     // Since safe-mode only counts complete blocks, and we now have
     // one more complete block, we need to adjust the total up, and
     // one more complete block, we need to adjust the total up, and
     // also count it as safe, if we have at least the minimum replica
     // also count it as safe, if we have at least the minimum replica
@@ -722,34 +713,18 @@ public class BlockManager implements BlockStatsMXBean {
         ((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication;
         ((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication;
     namesystem.incrementSafeBlockCount(
     namesystem.incrementSafeBlockCount(
         Math.min(numNodes, minStorage), curBlock);
         Math.min(numNodes, minStorage), curBlock);
-    
-    // replace block in the blocksMap
-    return blocksMap.replaceBlock(completeBlock);
   }
   }
 
 
-  private BlockInfo completeBlock(final BlockCollection bc,
-      final BlockInfo block, boolean force) throws IOException {
-    BlockInfo[] fileBlocks = bc.getBlocks();
-    for (int idx = 0; idx < fileBlocks.length; idx++) {
-      if (fileBlocks[idx] == block) {
-        return completeBlock(bc, idx, force);
-      }
-    }
-    return block;
-  }
-  
   /**
   /**
    * Force the given block in the given file to be marked as complete,
    * Force the given block in the given file to be marked as complete,
    * regardless of whether enough replicas are present. This is necessary
    * regardless of whether enough replicas are present. This is necessary
    * when tailing edit logs as a Standby.
    * when tailing edit logs as a Standby.
    */
    */
-  public BlockInfo forceCompleteBlock(final BlockCollection bc,
-      final BlockInfo block) throws IOException {
+  public void forceCompleteBlock(final BlockInfo block) throws IOException {
     block.commitBlock(block);
     block.commitBlock(block);
-    return completeBlock(bc, block, true);
+    completeBlock(block, true);
   }
   }
 
 
-  
   /**
   /**
    * Convert the last block of the file to an under construction block.<p>
    * Convert the last block of the file to an under construction block.<p>
    * The block is converted only if the file has blocks and the last one
    * The block is converted only if the file has blocks and the last one
@@ -1270,42 +1245,41 @@ public class BlockManager implements BlockStatsMXBean {
   private void markBlockAsCorrupt(BlockToMarkCorrupt b,
   private void markBlockAsCorrupt(BlockToMarkCorrupt b,
       DatanodeStorageInfo storageInfo,
       DatanodeStorageInfo storageInfo,
       DatanodeDescriptor node) throws IOException {
       DatanodeDescriptor node) throws IOException {
-
-    if (b.stored.isDeleted()) {
+    if (b.getStored().isDeleted()) {
       blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
       blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
           " corrupt as it does not belong to any file", b);
           " corrupt as it does not belong to any file", b);
-      addToInvalidates(b.corrupted, node);
+      addToInvalidates(b.getCorrupted(), node);
       return;
       return;
     }
     }
     short expectedReplicas =
     short expectedReplicas =
-        getExpectedReplicaNum(b.stored);
+        getExpectedReplicaNum(b.getStored());
 
 
     // Add replica to the data-node if it is not already there
     // Add replica to the data-node if it is not already there
     if (storageInfo != null) {
     if (storageInfo != null) {
-      storageInfo.addBlock(b.stored, b.corrupted);
+      storageInfo.addBlock(b.getStored(), b.getCorrupted());
     }
     }
 
 
     // Add this replica to corruptReplicas Map. For striped blocks, we always
     // Add this replica to corruptReplicas Map. For striped blocks, we always
     // use the id of whole striped block group when adding to corruptReplicas
     // use the id of whole striped block group when adding to corruptReplicas
-    Block corrupted = new Block(b.corrupted);
-    if (b.stored.isStriped()) {
-      corrupted.setBlockId(b.stored.getBlockId());
+    Block corrupted = new Block(b.getCorrupted());
+    if (b.getStored().isStriped()) {
+      corrupted.setBlockId(b.getStored().getBlockId());
     }
     }
-    corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.reason,
-        b.reasonCode);
+    corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
+        b.getReasonCode());
 
 
-    NumberReplicas numberOfReplicas = countNodes(b.stored);
+    NumberReplicas numberOfReplicas = countNodes(b.getStored());
     boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
     boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
         expectedReplicas;
         expectedReplicas;
 
 
-    boolean minReplicationSatisfied = hasMinStorage(b.stored,
+    boolean minReplicationSatisfied = hasMinStorage(b.getStored(),
         numberOfReplicas.liveReplicas());
         numberOfReplicas.liveReplicas());
 
 
     boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
     boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
         (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
         (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
         expectedReplicas;
         expectedReplicas;
     boolean corruptedDuringWrite = minReplicationSatisfied &&
     boolean corruptedDuringWrite = minReplicationSatisfied &&
-        (b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp());
+        b.isCorruptedDuringWrite();
     // case 1: have enough number of live replicas
     // case 1: have enough number of live replicas
     // case 2: corrupted replicas + live replicas > Replication factor
     // case 2: corrupted replicas + live replicas > Replication factor
     // case 3: Block is marked corrupt due to failure while writing. In this
     // case 3: Block is marked corrupt due to failure while writing. In this
@@ -1318,7 +1292,7 @@ public class BlockManager implements BlockStatsMXBean {
       invalidateBlock(b, node, numberOfReplicas);
       invalidateBlock(b, node, numberOfReplicas);
     } else if (namesystem.isPopulatingReplQueues()) {
     } else if (namesystem.isPopulatingReplQueues()) {
       // add the block to neededReplication
       // add the block to neededReplication
-      updateNeededReplications(b.stored, -1, 0);
+      updateNeededReplications(b.getStored(), -1, 0);
     }
     }
   }
   }
 
 
@@ -1342,13 +1316,13 @@ public class BlockManager implements BlockStatsMXBean {
           "invalidation of {} on {} because {} replica(s) are located on " +
           "invalidation of {} on {} because {} replica(s) are located on " +
           "nodes with potentially out-of-date block reports", b, dn,
           "nodes with potentially out-of-date block reports", b, dn,
           nr.replicasOnStaleNodes());
           nr.replicasOnStaleNodes());
-      postponeBlock(b.corrupted);
+      postponeBlock(b.getCorrupted());
       return false;
       return false;
     } else {
     } else {
       // we already checked the number of replicas in the caller of this
       // we already checked the number of replicas in the caller of this
       // function and know there are enough live replicas, so we can delete it.
       // function and know there are enough live replicas, so we can delete it.
-      addToInvalidates(b.corrupted, dn);
-      removeStoredBlock(b.stored, node);
+      addToInvalidates(b.getCorrupted(), dn);
+      removeStoredBlock(b.getStored(), node);
       blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
       blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
           b, dn);
           b, dn);
       return true;
       return true;
@@ -1448,70 +1422,9 @@ public class BlockManager implements BlockStatsMXBean {
       synchronized (neededReplications) {
       synchronized (neededReplications) {
         for (int priority = 0; priority < blocksToRecover.size(); priority++) {
         for (int priority = 0; priority < blocksToRecover.size(); priority++) {
           for (BlockInfo block : blocksToRecover.get(priority)) {
           for (BlockInfo block : blocksToRecover.get(priority)) {
-            // block should belong to a file
-            bc = getBlockCollection(block);
-            // abandoned block or block reopened for append
-            if (bc == null
-                || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) {
-              // remove from neededReplications
-              neededReplications.remove(block, priority);
-              continue;
-            }
-
-            requiredReplication = getExpectedReplicaNum(block);
-
-            // get a source data-node
-            containingNodes = new ArrayList<>();
-            List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<>();
-            NumberReplicas numReplicas = new NumberReplicas();
-            List<Short> liveBlockIndices = new ArrayList<>();
-            final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
-                containingNodes, liveReplicaNodes, numReplicas,
-                liveBlockIndices, priority);
-            if(srcNodes == null || srcNodes.length == 0) {
-              // block can not be replicated from any node
-              LOG.debug("Block " + block + " cannot be recovered " +
-                  "from any node");
-              continue;
-            }
-
-            // liveReplicaNodes can include READ_ONLY_SHARED replicas which are
-            // not included in the numReplicas.liveReplicas() count
-            assert liveReplicaNodes.size() >= numReplicas.liveReplicas();
-
-            // do not schedule more if enough replicas is already pending
-            numEffectiveReplicas = numReplicas.liveReplicas() +
-                                    pendingReplications.getNumReplicas(block);
-
-            if (numEffectiveReplicas >= requiredReplication) {
-              if ( (pendingReplications.getNumReplicas(block) > 0) ||
-                   (blockHasEnoughRacks(block, requiredReplication)) ) {
-                neededReplications.remove(block, priority); // remove from neededReplications
-                blockLog.debug("BLOCK* Removing {} from neededReplications as" +
-                        " it has enough replicas", block);
-                continue;
-              }
-            }
-
-            if (numReplicas.liveReplicas() < requiredReplication) {
-              additionalReplRequired = requiredReplication
-                  - numEffectiveReplicas;
-            } else {
-              additionalReplRequired = 1; // Needed on a new rack
-            }
-            if (block.isStriped()) {
-              short[] indices = new short[liveBlockIndices.size()];
-              for (int i = 0 ; i < liveBlockIndices.size(); i++) {
-                indices[i] = liveBlockIndices.get(i);
-              }
-              ErasureCodingWork ecw = new ErasureCodingWork(block, bc, srcNodes,
-                  containingNodes, liveReplicaNodes, additionalReplRequired,
-                  priority, indices);
-              recovWork.add(ecw);
-            } else {
-              recovWork.add(new ReplicationWork(block, bc, srcNodes,
-                  containingNodes, liveReplicaNodes, additionalReplRequired,
-                  priority));
+            BlockRecoveryWork rw = scheduleRecovery(block, priority);
+            if (rw != null) {
+              recovWork.add(rw);
             }
             }
           }
           }
         }
         }
@@ -1521,12 +1434,12 @@ public class BlockManager implements BlockStatsMXBean {
     }
     }
 
 
     // Step 2: choose target nodes for each recovery task
     // Step 2: choose target nodes for each recovery task
-    final Set<Node> excludedNodes = new HashSet<Node>();
+    final Set<Node> excludedNodes = new HashSet<>();
     for(BlockRecoveryWork rw : recovWork){
     for(BlockRecoveryWork rw : recovWork){
       // Exclude all of the containing nodes from being targets.
       // Exclude all of the containing nodes from being targets.
       // This list includes decommissioning or corrupt nodes.
       // This list includes decommissioning or corrupt nodes.
       excludedNodes.clear();
       excludedNodes.clear();
-      for (DatanodeDescriptor dn : rw.containingNodes) {
+      for (DatanodeDescriptor dn : rw.getContainingNodes()) {
         excludedNodes.add(dn);
         excludedNodes.add(dn);
       }
       }
 
 
@@ -1534,7 +1447,7 @@ public class BlockManager implements BlockStatsMXBean {
       // It is costly to extract the filename for which chooseTargets is called,
       // It is costly to extract the filename for which chooseTargets is called,
       // so for now we pass in the block collection itself.
       // so for now we pass in the block collection itself.
       final BlockPlacementPolicy placementPolicy =
       final BlockPlacementPolicy placementPolicy =
-          placementPolicies.getPolicy(rw.block.isStriped());
+          placementPolicies.getPolicy(rw.getBlock().isStriped());
       rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes);
       rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes);
     }
     }
 
 
@@ -1542,92 +1455,15 @@ public class BlockManager implements BlockStatsMXBean {
     namesystem.writeLock();
     namesystem.writeLock();
     try {
     try {
       for(BlockRecoveryWork rw : recovWork){
       for(BlockRecoveryWork rw : recovWork){
-        final DatanodeStorageInfo[] targets = rw.targets;
+        final DatanodeStorageInfo[] targets = rw.getTargets();
         if(targets == null || targets.length == 0){
         if(targets == null || targets.length == 0){
-          rw.targets = null;
+          rw.resetTargets();
           continue;
           continue;
         }
         }
 
 
         synchronized (neededReplications) {
         synchronized (neededReplications) {
-          BlockInfo block = rw.block;
-          int priority = rw.priority;
-          // Recheck since global lock was released
-          // block should belong to a file
-          bc = getBlockCollection(block);
-          // abandoned block or block reopened for append
-          if(bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) {
-            neededReplications.remove(block, priority); // remove from neededReplications
-            rw.targets = null;
-            continue;
-          }
-          requiredReplication = getExpectedReplicaNum(block);
-
-          // do not schedule more if enough replicas is already pending
-          NumberReplicas numReplicas = countNodes(block);
-          numEffectiveReplicas = numReplicas.liveReplicas() +
-            pendingReplications.getNumReplicas(block);
-
-          if (numEffectiveReplicas >= requiredReplication) {
-            if ( (pendingReplications.getNumReplicas(block) > 0) ||
-                 (blockHasEnoughRacks(block, requiredReplication)) ) {
-              neededReplications.remove(block, priority); // remove from neededReplications
-              rw.targets = null;
-              blockLog.debug("BLOCK* Removing {} from neededReplications as" +
-                      " it has enough replicas", block);
-              continue;
-            }
-          }
-
-          if ( (numReplicas.liveReplicas() >= requiredReplication) &&
-               (!blockHasEnoughRacks(block, requiredReplication)) ) {
-            if (rw.srcNodes[0].getNetworkLocation().equals(
-                targets[0].getDatanodeDescriptor().getNetworkLocation())) {
-              //No use continuing, unless a new rack in this case
-              continue;
-            }
-          }
-
-          // Add block to the to be replicated list
-          if (block.isStriped()) {
-            assert rw instanceof ErasureCodingWork;
-            assert rw.targets.length > 0;
-            String src = getBlockCollection(block).getName();
-            ErasureCodingZone ecZone = null;
-            try {
-              ecZone = namesystem.getErasureCodingZoneForPath(src);
-            } catch (IOException e) {
-              blockLog
-                  .warn("Failed to get the EC zone for the file {} ", src);
-            }
-            if (ecZone == null) {
-              blockLog.warn("No erasure coding policy found for the file {}. "
-                  + "So cannot proceed for recovery", src);
-              // TODO: we may have to revisit later for what we can do better to
-              // handle this case.
-              continue;
-            }
-            rw.targets[0].getDatanodeDescriptor().addBlockToBeErasureCoded(
-                new ExtendedBlock(namesystem.getBlockPoolId(), block),
-                rw.srcNodes, rw.targets,
-                ((ErasureCodingWork) rw).liveBlockIndicies,
-                ecZone.getErasureCodingPolicy());
-          } else {
-            rw.srcNodes[0].addBlockToBeReplicated(block, targets);
-          }
-          scheduledWork++;
-          DatanodeStorageInfo.incrementBlocksScheduled(targets);
-
-          // Move the block-replication into a "pending" state.
-          // The reason we use 'pending' is so we can retry
-          // replications that fail after an appropriate amount of time.
-          pendingReplications.increment(block,
-              DatanodeStorageInfo.toDatanodeDescriptors(targets));
-          blockLog.debug("BLOCK* block {} is moved from neededReplications to "
-                  + "pendingReplications", block);
-
-          // remove from neededReplications
-          if(numEffectiveReplicas + targets.length >= requiredReplication) {
-            neededReplications.remove(block, priority); // remove from neededReplications
+          if (validateRecoveryWork(rw)) {
+            scheduledWork++;
           }
           }
         }
         }
       }
       }
@@ -1638,15 +1474,15 @@ public class BlockManager implements BlockStatsMXBean {
     if (blockLog.isInfoEnabled()) {
     if (blockLog.isInfoEnabled()) {
       // log which blocks have been scheduled for replication
       // log which blocks have been scheduled for replication
       for(BlockRecoveryWork rw : recovWork){
       for(BlockRecoveryWork rw : recovWork){
-        DatanodeStorageInfo[] targets = rw.targets;
+        DatanodeStorageInfo[] targets = rw.getTargets();
         if (targets != null && targets.length != 0) {
         if (targets != null && targets.length != 0) {
           StringBuilder targetList = new StringBuilder("datanode(s)");
           StringBuilder targetList = new StringBuilder("datanode(s)");
           for (DatanodeStorageInfo target : targets) {
           for (DatanodeStorageInfo target : targets) {
             targetList.append(' ');
             targetList.append(' ');
             targetList.append(target.getDatanodeDescriptor());
             targetList.append(target.getDatanodeDescriptor());
           }
           }
-          blockLog.debug("BLOCK* ask {} to replicate {} to {}", rw.srcNodes,
-              rw.block, targetList);
+          blockLog.debug("BLOCK* ask {} to replicate {} to {}", rw.getSrcNodes(),
+              rw.getBlock(), targetList);
         }
         }
       }
       }
     }
     }
@@ -1658,6 +1494,160 @@ public class BlockManager implements BlockStatsMXBean {
     return scheduledWork;
     return scheduledWork;
   }
   }
 
 
+  boolean hasEnoughEffectiveReplicas(BlockInfo block,
+      NumberReplicas numReplicas, int pendingReplicaNum, int required) {
+    int numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplicaNum;
+    return (numEffectiveReplicas >= required) &&
+        (pendingReplicaNum > 0 || blockHasEnoughRacks(block, required));
+  }
+
+  private BlockRecoveryWork scheduleRecovery(BlockInfo block, int priority) {
+    // block should belong to a file
+    BlockCollection bc = getBlockCollection(block);
+    // abandoned block or block reopened for append
+    if (bc == null
+        || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) {
+      // remove from neededReplications
+      neededReplications.remove(block, priority);
+      return null;
+    }
+
+    short requiredReplication = getExpectedReplicaNum(block);
+
+    // get a source data-node
+    List<DatanodeDescriptor> containingNodes = new ArrayList<>();
+    List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<>();
+    NumberReplicas numReplicas = new NumberReplicas();
+    List<Short> liveBlockIndices = new ArrayList<>();
+    final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+        containingNodes, liveReplicaNodes, numReplicas,
+        liveBlockIndices, priority);
+    if(srcNodes == null || srcNodes.length == 0) {
+      // block can not be recovered from any node
+      LOG.debug("Block " + block + " cannot be recovered " +
+          "from any node");
+      return null;
+    }
+
+    // liveReplicaNodes can include READ_ONLY_SHARED replicas which are
+    // not included in the numReplicas.liveReplicas() count
+    assert liveReplicaNodes.size() >= numReplicas.liveReplicas();
+
+    int pendingNum = pendingReplications.getNumReplicas(block);
+    if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
+        requiredReplication)) {
+      neededReplications.remove(block, priority);
+      blockLog.debug("BLOCK* Removing {} from neededReplications as" +
+          " it has enough replicas", block);
+      return null;
+    }
+
+    final int additionalReplRequired;
+    if (numReplicas.liveReplicas() < requiredReplication) {
+      additionalReplRequired = requiredReplication - numReplicas.liveReplicas()
+          - pendingNum;
+    } else {
+      additionalReplRequired = 1; // Needed on a new rack
+    }
+
+    if (block.isStriped()) {
+      short[] indices = new short[liveBlockIndices.size()];
+      for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+        indices[i] = liveBlockIndices.get(i);
+      }
+      return new ErasureCodingWork(block, bc, srcNodes,
+          containingNodes, liveReplicaNodes, additionalReplRequired,
+          priority, indices);
+    } else {
+      return new ReplicationWork(block, bc, srcNodes,
+          containingNodes, liveReplicaNodes, additionalReplRequired,
+          priority);
+    }
+  }
+
+  private boolean validateRecoveryWork(BlockRecoveryWork rw) {
+    BlockInfo block = rw.getBlock();
+    int priority = rw.getPriority();
+    // Recheck since global lock was released
+    // block should belong to a file
+    BlockCollection bc = getBlockCollection(block);
+    // abandoned block or block reopened for append
+    if (bc == null
+        || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) {
+      neededReplications.remove(block, priority);
+      rw.resetTargets();
+      return false;
+    }
+
+    // do not schedule more if enough replicas is already pending
+    final short requiredReplication = getExpectedReplicaNum(block);
+    NumberReplicas numReplicas = countNodes(block);
+    final int pendingNum = pendingReplications.getNumReplicas(block);
+    if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
+        requiredReplication)) {
+      neededReplications.remove(block, priority);
+      rw.resetTargets();
+      blockLog.debug("BLOCK* Removing {} from neededReplications as" +
+          " it has enough replicas", block);
+      return false;
+    }
+
+    DatanodeStorageInfo[] targets = rw.getTargets();
+    if ( (numReplicas.liveReplicas() >= requiredReplication) &&
+        (!blockHasEnoughRacks(block, requiredReplication)) ) {
+      if (rw.getSrcNodes()[0].getNetworkLocation().equals(
+          targets[0].getDatanodeDescriptor().getNetworkLocation())) {
+        //No use continuing, unless a new rack in this case
+        return false;
+      }
+    }
+
+    // Add block to the to be recovered list
+    if (block.isStriped()) {
+      assert rw instanceof ErasureCodingWork;
+      assert rw.getTargets().length > 0;
+      String src = getBlockCollection(block).getName();
+      ErasureCodingZone ecZone = null;
+      try {
+        ecZone = namesystem.getErasureCodingZoneForPath(src);
+      } catch (IOException e) {
+        blockLog
+            .warn("Failed to get the EC zone for the file {} ", src);
+      }
+      if (ecZone == null) {
+        blockLog.warn("No erasure coding policy found for the file {}. "
+            + "So cannot proceed for recovery", src);
+        // TODO: we may have to revisit later for what we can do better to
+        // handle this case.
+        return false;
+      }
+      rw.getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded(
+          new ExtendedBlock(namesystem.getBlockPoolId(), block),
+          rw.getSrcNodes(), rw.getTargets(),
+          ((ErasureCodingWork) rw).getLiveBlockIndicies(),
+          ecZone.getErasureCodingPolicy());
+    } else {
+      rw.getSrcNodes()[0].addBlockToBeReplicated(block, targets);
+    }
+
+    DatanodeStorageInfo.incrementBlocksScheduled(targets);
+
+    // Move the block-replication into a "pending" state.
+    // The reason we use 'pending' is so we can retry
+    // replications that fail after an appropriate amount of time.
+    pendingReplications.increment(block,
+        DatanodeStorageInfo.toDatanodeDescriptors(targets));
+    blockLog.debug("BLOCK* block {} is moved from neededReplications to "
+        + "pendingReplications", block);
+
+    int numEffectiveReplicas = numReplicas.liveReplicas() + pendingNum;
+    // remove from neededReplications
+    if(numEffectiveReplicas + targets.length >= requiredReplication) {
+      neededReplications.remove(block, priority);
+    }
+    return true;
+  }
+
   /** Choose target for WebHDFS redirection. */
   /** Choose target for WebHDFS redirection. */
   public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src,
   public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src,
       DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
       DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
@@ -1926,48 +1916,6 @@ public class BlockManager implements BlockStatsMXBean {
     }
     }
   }
   }
 
 
-  /**
-   * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a
-   * list of blocks that should be considered corrupt due to a block report.
-   */
-  private static class BlockToMarkCorrupt {
-    /**
-     * The corrupted block in a datanode. This is the one reported by the
-     * datanode.
-     */
-    final Block corrupted;
-    /** The corresponding block stored in the BlockManager. */
-    final BlockInfo stored;
-    /** The reason to mark corrupt. */
-    final String reason;
-    /** The reason code to be stored */
-    final Reason reasonCode;
-
-    BlockToMarkCorrupt(Block corrupted, BlockInfo stored, String reason,
-        Reason reasonCode) {
-      Preconditions.checkNotNull(corrupted, "corrupted is null");
-      Preconditions.checkNotNull(stored, "stored is null");
-
-      this.corrupted = corrupted;
-      this.stored = stored;
-      this.reason = reason;
-      this.reasonCode = reasonCode;
-    }
-
-    BlockToMarkCorrupt(Block corrupted, BlockInfo stored, long gs,
-        String reason, Reason reasonCode) {
-      this(corrupted, stored, reason, reasonCode);
-      //the corrupted block in datanode has a different generation stamp
-      corrupted.setGenerationStamp(gs);
-    }
-
-    @Override
-    public String toString() {
-      return corrupted + "("
-          + (corrupted == stored? "same as stored": "stored=" + stored) + ")";
-    }
-  }
-
   /**
   /**
    * The given storage is reporting all its blocks.
    * The given storage is reporting all its blocks.
    * Update the (storage-->block list) and (block-->storage list) maps.
    * Update the (storage-->block list) and (block-->storage list) maps.
@@ -2722,7 +2670,7 @@ public class BlockManager implements BlockStatsMXBean {
     int numCurrentReplica = countLiveNodes(storedBlock);
     int numCurrentReplica = countLiveNodes(storedBlock);
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
         && hasMinStorage(storedBlock, numCurrentReplica)) {
         && hasMinStorage(storedBlock, numCurrentReplica)) {
-      completeBlock(getBlockCollection(storedBlock), storedBlock, false);
+      completeBlock(storedBlock, false);
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
       // check whether safe replication is reached for the block
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that.
       // only complete blocks are counted towards that.
@@ -2797,7 +2745,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
         hasMinStorage(storedBlock, numLiveReplicas)) {
         hasMinStorage(storedBlock, numLiveReplicas)) {
-      storedBlock = completeBlock(bc, storedBlock, false);
+      completeBlock(storedBlock, false);
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
     } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
       // check whether safe replication is reached for the block
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that
       // only complete blocks are counted towards that
@@ -4196,112 +4144,6 @@ public class BlockManager implements BlockStatsMXBean {
     return lb;
     return lb;
   }
   }
 
 
-  /**
-   * This class is used internally by {@link this#computeRecoveryWorkForBlocks}
-   * to represent a task to recover a block through replication or erasure
-   * coding. Recovery is done by transferring data from srcNodes to targets
-   */
-  private abstract static class BlockRecoveryWork {
-    final BlockInfo block;
-    final BlockCollection bc;
-
-    /**
-     * An erasure coding recovery task has multiple source nodes.
-     * A replication task only has 1 source node, stored on top of the array
-     */
-    final DatanodeDescriptor[] srcNodes;
-    /** Nodes containing the block; avoid them in choosing new targets */
-    final List<DatanodeDescriptor> containingNodes;
-    /** Required by {@link BlockPlacementPolicy#chooseTarget} */
-    final List<DatanodeStorageInfo> liveReplicaStorages;
-    final int additionalReplRequired;
-
-    DatanodeStorageInfo[] targets;
-    final int priority;
-
-    BlockRecoveryWork(BlockInfo block,
-        BlockCollection bc,
-        DatanodeDescriptor[] srcNodes,
-        List<DatanodeDescriptor> containingNodes,
-        List<DatanodeStorageInfo> liveReplicaStorages,
-        int additionalReplRequired,
-        int priority) {
-      this.block = block;
-      this.bc = bc;
-      this.srcNodes = srcNodes;
-      this.containingNodes = containingNodes;
-      this.liveReplicaStorages = liveReplicaStorages;
-      this.additionalReplRequired = additionalReplRequired;
-      this.priority = priority;
-      this.targets = null;
-    }
-
-    abstract void chooseTargets(BlockPlacementPolicy blockplacement,
-        BlockStoragePolicySuite storagePolicySuite,
-        Set<Node> excludedNodes);
-  }
-
-  private static class ReplicationWork extends BlockRecoveryWork {
-    ReplicationWork(BlockInfo block,
-        BlockCollection bc,
-        DatanodeDescriptor[] srcNodes,
-        List<DatanodeDescriptor> containingNodes,
-        List<DatanodeStorageInfo> liveReplicaStorages,
-        int additionalReplRequired,
-        int priority) {
-      super(block, bc, srcNodes, containingNodes,
-          liveReplicaStorages, additionalReplRequired, priority);
-      LOG.debug("Creating a ReplicationWork to recover " + block);
-    }
-
-    @Override
-    void chooseTargets(BlockPlacementPolicy blockplacement,
-        BlockStoragePolicySuite storagePolicySuite,
-        Set<Node> excludedNodes) {
-      assert srcNodes.length > 0
-          : "At least 1 source node should have been selected";
-      try {
-        targets = blockplacement.chooseTarget(bc.getName(),
-            additionalReplRequired, srcNodes[0], liveReplicaStorages, false,
-            excludedNodes, block.getNumBytes(),
-            storagePolicySuite.getPolicy(bc.getStoragePolicyID()));
-      } finally {
-        srcNodes[0].decrementPendingReplicationWithoutTargets();
-      }
-    }
-  }
-
-  private static class ErasureCodingWork extends BlockRecoveryWork {
-    final short[] liveBlockIndicies;
-
-    ErasureCodingWork(BlockInfo block,
-        BlockCollection bc,
-        DatanodeDescriptor[] srcNodes,
-        List<DatanodeDescriptor> containingNodes,
-        List<DatanodeStorageInfo> liveReplicaStorages,
-        int additionalReplRequired,
-        int priority, short[] liveBlockIndicies) {
-      super(block, bc, srcNodes, containingNodes,
-          liveReplicaStorages, additionalReplRequired, priority);
-      this.liveBlockIndicies = liveBlockIndicies;
-      LOG.debug("Creating an ErasureCodingWork to recover " + block);
-    }
-
-    @Override
-    void chooseTargets(BlockPlacementPolicy blockplacement,
-        BlockStoragePolicySuite storagePolicySuite,
-        Set<Node> excludedNodes) {
-      try {
-        // TODO: new placement policy for EC considering multiple writers
-        targets = blockplacement.chooseTarget(bc.getName(),
-            additionalReplRequired, srcNodes[0], liveReplicaStorages, false,
-            excludedNodes, block.getNumBytes(),
-            storagePolicySuite.getPolicy(bc.getStoragePolicyID()));
-      } finally {
-      }
-    }
-  }
-
   /**
   /**
    * A simple result enum for the result of
    * A simple result enum for the result of
    * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}.
    * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}.
@@ -4315,9 +4157,9 @@ public class BlockManager implements BlockStatsMXBean {
     OVER_REPLICATED,
     OVER_REPLICATED,
     /** A decision can't currently be made about this block. */
     /** A decision can't currently be made about this block. */
     POSTPONE,
     POSTPONE,
-    /** The block is under construction, so should be ignored */
+    /** The block is under construction, so should be ignored. */
     UNDER_CONSTRUCTION,
     UNDER_CONSTRUCTION,
-    /** The block is properly replicated */
+    /** The block is properly replicated. */
     OK
     OK
   }
   }
 
 

+ 41 - 106
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -26,12 +26,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
@@ -458,19 +455,18 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
         for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
             .entrySet().iterator(); iter.hasNext(); ) {
             .entrySet().iterator(); iter.hasNext(); ) {
           Map.Entry<StorageType, Integer> entry = iter.next();
           Map.Entry<StorageType, Integer> entry = iter.next();
-          for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
-              localDatanode.getStorageInfos())) {
-            StorageType type = entry.getKey();
-            if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-                results, type) >= 0) {
-              int num = entry.getValue();
-              if (num == 1) {
-                iter.remove();
-              } else {
-                entry.setValue(num - 1);
-              }
-              return localStorage;
+          DatanodeStorageInfo localStorage = chooseStorage4Block(
+              localDatanode, blocksize, results, entry.getKey());
+          if (localStorage != null) {
+            // add node and related nodes to excludedNode
+            addToExcludedNodes(localDatanode, excludedNodes);
+            int num = entry.getValue();
+            if (num == 1) {
+              iter.remove();
+            } else {
+              entry.setValue(num - 1);
             }
             }
+            return localStorage;
           }
           }
         }
         }
       } 
       } 
@@ -651,7 +647,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                             boolean avoidStaleNodes,
                             boolean avoidStaleNodes,
                             EnumMap<StorageType, Integer> storageTypes)
                             EnumMap<StorageType, Integer> storageTypes)
                             throws NotEnoughReplicasException {
                             throws NotEnoughReplicasException {
-      
+
     int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
     int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
         scope, excludedNodes);
         scope, excludedNodes);
     StringBuilder builder = null;
     StringBuilder builder = null;
@@ -669,49 +665,39 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
           builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
         }
         }
         numOfAvailableNodes--;
         numOfAvailableNodes--;
-        if (!isGoodDatanode(chosenNode, maxNodesPerRack, considerLoad,
+        DatanodeStorageInfo storage = null;
+        if (isGoodDatanode(chosenNode, maxNodesPerRack, considerLoad,
             results, avoidStaleNodes)) {
             results, avoidStaleNodes)) {
-          if (LOG.isDebugEnabled()) {
-            builder.append("\n]");
-          }
-          badTarget = true;
-          continue;
-        }
-
-        final DatanodeStorageInfo[] storages = DFSUtil.shuffle(
-            chosenNode.getStorageInfos());
-        int i = 0;
-        boolean search = true;
-        for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
-            .entrySet().iterator(); search && iter.hasNext(); ) {
-          Map.Entry<StorageType, Integer> entry = iter.next();
-          for (i = 0; i < storages.length; i++) {
-            StorageType type = entry.getKey();
-            final int newExcludedNodes = addIfIsGoodTarget(storages[i],
-                excludedNodes, blocksize, results, type);
-            if (newExcludedNodes >= 0) {
+          for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
+              .entrySet().iterator(); iter.hasNext(); ) {
+            Map.Entry<StorageType, Integer> entry = iter.next();
+            storage = chooseStorage4Block(
+                chosenNode, blocksize, results, entry.getKey());
+            if (storage != null) {
               numOfReplicas--;
               numOfReplicas--;
               if (firstChosen == null) {
               if (firstChosen == null) {
-                firstChosen = storages[i];
+                firstChosen = storage;
               }
               }
-              numOfAvailableNodes -= newExcludedNodes;
+              // add node and related nodes to excludedNode
+              numOfAvailableNodes -=
+                  addToExcludedNodes(chosenNode, excludedNodes);
               int num = entry.getValue();
               int num = entry.getValue();
               if (num == 1) {
               if (num == 1) {
                 iter.remove();
                 iter.remove();
               } else {
               } else {
                 entry.setValue(num - 1);
                 entry.setValue(num - 1);
               }
               }
-              search = false;
               break;
               break;
             }
             }
           }
           }
         }
         }
+
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
           builder.append("\n]");
           builder.append("\n]");
         }
         }
 
 
         // If no candidate storage was found on this DN then set badTarget.
         // If no candidate storage was found on this DN then set badTarget.
-        badTarget = (i == storages.length);
+        badTarget = (storage == null);
       }
       }
     }
     }
       
       
@@ -740,32 +726,27 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
   }
 
 
   /**
   /**
-   * If the given storage is a good target, add it to the result list and
-   * update the set of excluded nodes.
-   * @return -1 if the given is not a good target;
-   *         otherwise, return the number of nodes added to excludedNodes set.
+   * Choose a good storage of given storage type from datanode, and add it to
+   * the result list.
+   *
+   * @param dnd datanode descriptor
+   * @param blockSize requested block size
+   * @param results the result storages
+   * @param storageType requested storage type
+   * @return the chosen datanode storage
    */
    */
-  int addIfIsGoodTarget(DatanodeStorageInfo storage,
-      Set<Node> excludedNodes,
+  DatanodeStorageInfo chooseStorage4Block(DatanodeDescriptor dnd,
       long blockSize,
       long blockSize,
       List<DatanodeStorageInfo> results,
       List<DatanodeStorageInfo> results,
       StorageType storageType) {
       StorageType storageType) {
-    if (isGoodTarget(storage, blockSize, results, storageType)) {
+    DatanodeStorageInfo storage =
+        dnd.chooseStorage4Block(storageType, blockSize);
+    if (storage != null) {
       results.add(storage);
       results.add(storage);
-      // add node and related nodes to excludedNode
-      return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
-    } else { 
-      return -1;
-    }
-  }
-
-  private static void logNodeIsNotChosen(DatanodeStorageInfo storage, String reason) {
-    if (LOG.isDebugEnabled()) {
-      // build the error message for later use.
-      debugLoggingBuilder.get()
-          .append("\n  Storage ").append(storage)
-          .append(" is not chosen since ").append(reason).append(".");
+    } else {
+      logNodeIsNotChosen(dnd, "no good storage to place the block ");
     }
     }
+    return storage;
   }
   }
 
 
   private static void logNodeIsNotChosen(DatanodeDescriptor node,
   private static void logNodeIsNotChosen(DatanodeDescriptor node,
@@ -836,52 +817,6 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     return true;
     return true;
   }
   }
 
 
-  /**
-   * Determine if a storage is a good target.
-   *
-   * @param storage The target storage
-   * @param blockSize Size of block
-   * @param results A list containing currently chosen nodes. Used to check if
-   *                too many nodes has been chosen in the target rack.
-   * @return Return true if <i>node</i> has enough space.
-   */
-  private boolean isGoodTarget(DatanodeStorageInfo storage,
-                               long blockSize,
-                               List<DatanodeStorageInfo> results,
-                               StorageType requiredStorageType) {
-    if (storage.getStorageType() != requiredStorageType) {
-      logNodeIsNotChosen(storage, "storage types do not match,"
-          + " where the required storage type is " + requiredStorageType);
-      return false;
-    }
-    if (storage.getState() == State.READ_ONLY_SHARED) {
-      logNodeIsNotChosen(storage, "storage is read-only");
-      return false;
-    }
-
-    if (storage.getState() == State.FAILED) {
-      logNodeIsNotChosen(storage, "storage has failed");
-      return false;
-    }
-
-    DatanodeDescriptor node = storage.getDatanodeDescriptor();
-
-    final long requiredSize = blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
-    final long scheduledSize = blockSize * node.getBlocksScheduled(storage.getStorageType());
-    final long remaining = node.getRemaining(storage.getStorageType(),
-        requiredSize);
-    if (requiredSize > remaining - scheduledSize) {
-      logNodeIsNotChosen(storage, "the node does not have enough "
-          + storage.getStorageType() + " space"
-          + " (required=" + requiredSize
-          + ", scheduled=" + scheduledSize
-          + ", remaining=" + remaining + ")");
-      return false;
-    }
-
-    return true;
-  }
-
   /**
   /**
    * Return a pipeline of nodes.
    * Return a pipeline of nodes.
    * The pipeline is formed finding a shortest path that 
    * The pipeline is formed finding a shortest path that 

+ 111 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java

@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.net.Node;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * This class is used internally by
+ * {@link BlockManager#computeRecoveryWorkForBlocks} to represent a task to
+ * recover a block through replication or erasure coding. Recovery is done by
+ * transferring data from srcNodes to targets
+ */
+abstract class BlockRecoveryWork {
+  private final BlockInfo block;
+
+  private final BlockCollection bc;
+
+  /**
+   * An erasure coding recovery task has multiple source nodes.
+   * A replication task only has 1 source node, stored on top of the array
+   */
+  private final DatanodeDescriptor[] srcNodes;
+  /** Nodes containing the block; avoid them in choosing new targets */
+  private final List<DatanodeDescriptor> containingNodes;
+  /** Required by {@link BlockPlacementPolicy#chooseTarget} */
+  private  final List<DatanodeStorageInfo> liveReplicaStorages;
+  private final int additionalReplRequired;
+
+  private DatanodeStorageInfo[] targets;
+  private final int priority;
+
+  public BlockRecoveryWork(BlockInfo block,
+      BlockCollection bc,
+      DatanodeDescriptor[] srcNodes,
+      List<DatanodeDescriptor> containingNodes,
+      List<DatanodeStorageInfo> liveReplicaStorages,
+      int additionalReplRequired,
+      int priority) {
+    this.block = block;
+    this.bc = bc;
+    this.srcNodes = srcNodes;
+    this.containingNodes = containingNodes;
+    this.liveReplicaStorages = liveReplicaStorages;
+    this.additionalReplRequired = additionalReplRequired;
+    this.priority = priority;
+    this.targets = null;
+  }
+
+  DatanodeStorageInfo[] getTargets() {
+    return targets;
+  }
+
+  void resetTargets() {
+    this.targets = null;
+  }
+
+  void setTargets(DatanodeStorageInfo[] targets) {
+    this.targets = targets;
+  }
+
+  List<DatanodeDescriptor> getContainingNodes() {
+    return Collections.unmodifiableList(containingNodes);
+  }
+
+  public int getPriority() {
+    return priority;
+  }
+
+  public BlockInfo getBlock() {
+    return block;
+  }
+
+  public DatanodeDescriptor[] getSrcNodes() {
+    return srcNodes;
+  }
+
+  BlockCollection getBc() {
+    return bc;
+  }
+
+  List<DatanodeStorageInfo> getLiveReplicaStorages() {
+    return liveReplicaStorages;
+  }
+
+  public int getAdditionalReplRequired() {
+    return additionalReplRequired;
+  }
+
+  abstract void chooseTargets(BlockPlacementPolicy blockplacement,
+      BlockStoragePolicySuite storagePolicySuite,
+      Set<Node> excludedNodes);
+}

+ 82 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java

@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+
+/**
+ * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a
+ * list of blocks that should be considered corrupt due to a block report.
+ */
+class BlockToMarkCorrupt {
+  /** The corrupted block in a datanode. */
+  private final Block corrupted;
+  /** The corresponding block stored in the BlockManager. */
+  private final BlockInfo stored;
+  /** The reason to mark corrupt. */
+  private final String reason;
+  /** The reason code to be stored */
+  private final CorruptReplicasMap.Reason reasonCode;
+
+  BlockToMarkCorrupt(Block corrupted, BlockInfo stored, String reason,
+      CorruptReplicasMap.Reason reasonCode) {
+    Preconditions.checkNotNull(corrupted, "corrupted is null");
+    Preconditions.checkNotNull(stored, "stored is null");
+
+    this.corrupted = corrupted;
+    this.stored = stored;
+    this.reason = reason;
+    this.reasonCode = reasonCode;
+  }
+
+  BlockToMarkCorrupt(Block corrupted, BlockInfo stored, long gs, String reason,
+      CorruptReplicasMap.Reason reasonCode) {
+    this(corrupted, stored, reason, reasonCode);
+    //the corrupted block in datanode has a different generation stamp
+    this.corrupted.setGenerationStamp(gs);
+  }
+
+  public boolean isCorruptedDuringWrite() {
+    return stored.getGenerationStamp() > corrupted.getGenerationStamp();
+  }
+
+  public Block getCorrupted() {
+    return corrupted;
+  }
+
+  public BlockInfo getStored() {
+    return stored;
+  }
+
+  public String getReason() {
+    return reason;
+  }
+
+  public Reason getReasonCode() {
+    return reasonCode;
+  }
+
+  @Override
+  public String toString() {
+    return corrupted + "("
+        + (corrupted == stored ? "same as stored": "stored=" + stored) + ")";
+  }
+}

+ 0 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -220,20 +220,4 @@ class BlocksMap {
   int getCapacity() {
   int getCapacity() {
     return capacity;
     return capacity;
   }
   }
-
-  /**
-   * Replace a block in the block map by a new block.
-   * The new block and the old one have the same key.
-   * @param newBlock - block for replacement
-   * @return new block
-   */
-  BlockInfo replaceBlock(BlockInfo newBlock) {
-    BlockInfo currentBlock = blocks.get(newBlock);
-    assert currentBlock != null : "the block if not in blocksMap";
-    // replace block in data-node lists
-    currentBlock.replaceBlock(newBlock);
-    // replace block in the map itself
-    blocks.put(newBlock);
-    return newBlock;
-  }
 }
 }

+ 25 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -32,6 +32,7 @@ import java.util.Set;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -704,26 +706,39 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
   }
 
 
   /**
   /**
-   * Return the sum of remaining spaces of the specified type. If the remaining
-   * space of a storage is less than minSize, it won't be counted toward the
-   * sum.
+   * Find whether the datanode contains good storage of given type to
+   * place block of size <code>blockSize</code>.
+   *
+   * <p>Currently datanode only cares about the storage type, in this
+   * method, the first storage of given type we see is returned.
    *
    *
-   * @param t The storage type. If null, the type is ignored.
-   * @param minSize The minimum free space required.
-   * @return the sum of remaining spaces that are bigger than minSize.
+   * @param t requested storage type
+   * @param blockSize requested block size
+   * @return
    */
    */
-  public long getRemaining(StorageType t, long minSize) {
+  public DatanodeStorageInfo chooseStorage4Block(StorageType t,
+      long blockSize) {
+    final long requiredSize =
+        blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
+    final long scheduledSize = blockSize * getBlocksScheduled(t);
     long remaining = 0;
     long remaining = 0;
+    DatanodeStorageInfo storage = null;
     for (DatanodeStorageInfo s : getStorageInfos()) {
     for (DatanodeStorageInfo s : getStorageInfos()) {
       if (s.getState() == State.NORMAL &&
       if (s.getState() == State.NORMAL &&
-          (t == null || s.getStorageType() == t)) {
+          s.getStorageType() == t) {
+        if (storage == null) {
+          storage = s;
+        }
         long r = s.getRemaining();
         long r = s.getRemaining();
-        if (r >= minSize) {
+        if (r >= requiredSize) {
           remaining += r;
           remaining += r;
         }
         }
       }
       }
     }
     }
-    return remaining;
+    if (requiredSize > remaining - scheduledSize) {
+      return null;
+    }
+    return storage;
   }
   }
 
 
   /**
   /**

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -1280,11 +1280,14 @@ public class DatanodeManager {
       for (DatanodeDescriptor dn : datanodeMap.values()) {
       for (DatanodeDescriptor dn : datanodeMap.values()) {
         final boolean isDead = isDatanodeDead(dn);
         final boolean isDead = isDatanodeDead(dn);
         final boolean isDecommissioning = dn.isDecommissionInProgress();
         final boolean isDecommissioning = dn.isDecommissionInProgress();
-        if ((listLiveNodes && !isDead) ||
+
+        if (((listLiveNodes && !isDead) ||
             (listDeadNodes && isDead) ||
             (listDeadNodes && isDead) ||
-            (listDecommissioningNodes && isDecommissioning)) {
-            nodes.add(dn);
+            (listDecommissioningNodes && isDecommissioning)) &&
+            hostFileManager.isIncluded(dn)) {
+          nodes.add(dn);
         }
         }
+
         foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
         foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
       }
       }
     }
     }

+ 60 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.net.Node;
+
+import java.util.List;
+import java.util.Set;
+
+class ErasureCodingWork extends BlockRecoveryWork {
+  private final short[] liveBlockIndicies;
+
+  public ErasureCodingWork(BlockInfo block,
+      BlockCollection bc,
+      DatanodeDescriptor[] srcNodes,
+      List<DatanodeDescriptor> containingNodes,
+      List<DatanodeStorageInfo> liveReplicaStorages,
+      int additionalReplRequired,
+      int priority, short[] liveBlockIndicies) {
+    super(block, bc, srcNodes, containingNodes,
+        liveReplicaStorages, additionalReplRequired, priority);
+    this.liveBlockIndicies = liveBlockIndicies;
+    BlockManager.LOG.debug("Creating an ErasureCodingWork to recover " + block);
+  }
+
+  short[] getLiveBlockIndicies() {
+    return liveBlockIndicies;
+  }
+
+  @Override
+  void chooseTargets(BlockPlacementPolicy blockplacement,
+      BlockStoragePolicySuite storagePolicySuite,
+      Set<Node> excludedNodes) {
+    try {
+      // TODO: new placement policy for EC considering multiple writers
+      DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
+          getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
+          getLiveReplicaStorages(), false, excludedNodes,
+          getBlock().getNumBytes(),
+          storagePolicySuite.getPolicy(getBc().getStoragePolicyID()));
+      setTargets(chosenTargets);
+    } finally {
+    }
+  }
+}

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java

@@ -126,9 +126,28 @@ class HostFileManager {
     return !includes.isEmpty();
     return !includes.isEmpty();
   }
   }
 
 
+  /**
+   * Read the includes and excludes lists from the named files.  Any previous
+   * includes and excludes lists are discarded.
+   * @param includeFile the path to the new includes list
+   * @param excludeFile the path to the new excludes list
+   * @throws IOException thrown if there is a problem reading one of the files
+   */
   void refresh(String includeFile, String excludeFile) throws IOException {
   void refresh(String includeFile, String excludeFile) throws IOException {
     HostSet newIncludes = readFile("included", includeFile);
     HostSet newIncludes = readFile("included", includeFile);
     HostSet newExcludes = readFile("excluded", excludeFile);
     HostSet newExcludes = readFile("excluded", excludeFile);
+
+    refresh(newIncludes, newExcludes);
+  }
+
+  /**
+   * Set the includes and excludes lists by the new HostSet instances. The
+   * old instances are discarded.
+   * @param newIncludes the new includes list
+   * @param newExcludes the new excludes list
+   */
+  @VisibleForTesting
+  void refresh(HostSet newIncludes, HostSet newExcludes) {
     synchronized (this) {
     synchronized (this) {
       includes = newIncludes;
       includes = newIncludes;
       excludes = newExcludes;
       excludes = newExcludes;

+ 53 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java

@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.net.Node;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+class ReplicationWork extends BlockRecoveryWork {
+  public ReplicationWork(BlockInfo block, BlockCollection bc,
+      DatanodeDescriptor[] srcNodes, List<DatanodeDescriptor> containingNodes,
+      List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired,
+      int priority) {
+    super(block, bc, srcNodes, containingNodes,
+        liveReplicaStorages, additionalReplRequired, priority);
+    BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block);
+  }
+
+  @Override
+  void chooseTargets(BlockPlacementPolicy blockplacement,
+      BlockStoragePolicySuite storagePolicySuite,
+      Set<Node> excludedNodes) {
+    assert getSrcNodes().length > 0
+        : "At least 1 source node should have been selected";
+    try {
+      DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
+          getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
+          getLiveReplicaStorages(), false, excludedNodes,
+          getBlock().getNumBytes(),
+          storagePolicySuite.getPolicy(getBc().getStoragePolicyID()));
+      setTargets(chosenTargets);
+    } finally {
+      getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
+    }
+  }
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java

@@ -125,8 +125,8 @@ public class DNConf {
         DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
         DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
     
     
     readaheadLength = conf.getLong(
     readaheadLength = conf.getLong(
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
     dropCacheBehindWrites = conf.getBoolean(
     dropCacheBehindWrites = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT);
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java

@@ -119,7 +119,7 @@ class EditLogBackupInputStream extends EditLogInputStream {
 
 
     this.version = version;
     this.version = version;
 
 
-    reader = new FSEditLogOp.Reader(in, tracker, version);
+    reader = FSEditLogOp.Reader.create(in, tracker, version);
   }
   }
 
 
   void clear() throws IOException {
   void clear() throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java

@@ -157,7 +157,7 @@ public class EditLogFileInputStream extends EditLogInputStream {
               "flags from log");
               "flags from log");
         }
         }
       }
       }
-      reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
+      reader = FSEditLogOp.Reader.create(dataIn, tracker, logVersion);
       reader.setMaxOpSize(maxOpSize);
       reader.setMaxOpSize(maxOpSize);
       state = State.OPEN;
       state = State.OPEN;
     } finally {
     } finally {

+ 40 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

@@ -17,15 +17,19 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
+import java.util.SortedSet;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.now;
@@ -103,6 +107,9 @@ class FSDirDeleteOp {
       fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
       fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                           FsAction.ALL, true);
                           FsAction.ALL, true);
     }
     }
+    if (recursive && fsd.isNonEmptyDirectory(iip)) {
+      checkProtectedDescendants(fsd, fsd.normalizePath(src));
+    }
 
 
     return deleteInternal(fsn, src, iip, logRetryCache);
     return deleteInternal(fsn, src, iip, logRetryCache);
   }
   }
@@ -262,4 +269,37 @@ class FSDirDeleteOp {
     }
     }
     return true;
     return true;
   }
   }
+
+  /**
+   * Throw if the given directory has any non-empty protected descendants
+   * (including itself).
+   *
+   * @param src directory whose descendants are to be checked. The caller
+   *            must ensure src is not terminated with {@link Path#SEPARATOR}.
+   * @throws AccessControlException if a non-empty protected descendant
+   *                                was found.
+   */
+  private static void checkProtectedDescendants(FSDirectory fsd, String src)
+      throws AccessControlException, UnresolvedLinkException {
+    final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
+
+    // Is src protected? Caller has already checked it is non-empty.
+    if (protectedDirs.contains(src)) {
+      throw new AccessControlException(
+          "Cannot delete non-empty protected directory " + src);
+    }
+
+    // Are any descendants of src protected?
+    // The subSet call returns only the descendants of src since
+    // {@link Path#SEPARATOR} is "/" and '0' is the next ASCII
+    // character after '/'.
+    for (String descendant :
+            protectedDirs.subSet(src + Path.SEPARATOR, src + "0")) {
+      if (fsd.isNonEmptyDirectory(fsd.getINodesInPath4Write(
+              descendant, false))) {
+        throw new AccessControlException(
+            "Cannot delete non-empty protected subdirectory " + descendant);
+      }
+    }
+  }
 }
 }

+ 63 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -70,9 +70,12 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT;
@@ -137,6 +140,13 @@ public class FSDirectory implements Closeable {
 
 
   private final int inodeXAttrsLimit; //inode xattrs max limit
   private final int inodeXAttrsLimit; //inode xattrs max limit
 
 
+  // A set of directories that have been protected using the
+  // dfs.namenode.protected.directories setting. These directories cannot
+  // be deleted unless they are empty.
+  //
+  // Each entry in this set must be a normalized path.
+  private final SortedSet<String> protectedDirectories;
+
   // lock to protect the directory and BlockMap
   // lock to protect the directory and BlockMap
   private final ReentrantReadWriteLock dirLock;
   private final ReentrantReadWriteLock dirLock;
 
 
@@ -281,6 +291,8 @@ public class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
 
 
+    this.protectedDirectories = parseProtectedDirectories(conf);
+
     Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
     Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
         "Cannot set a negative limit on the number of xattrs per inode (%s).",
         "Cannot set a negative limit on the number of xattrs per inode (%s).",
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
@@ -309,6 +321,25 @@ public class FSDirectory implements Closeable {
     return namesystem;
     return namesystem;
   }
   }
 
 
+  /**
+   * Parse configuration setting dfs.namenode.protected.directories to
+   * retrieve the set of protected directories.
+   *
+   * @param conf
+   * @return a TreeSet
+   */
+  @VisibleForTesting
+  static SortedSet<String> parseProtectedDirectories(Configuration conf) {
+    // Normalize each input path to guard against administrator error.
+    return new TreeSet<>(normalizePaths(
+        conf.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES),
+        FS_PROTECTED_DIRECTORIES));
+  }
+
+  SortedSet<String> getProtectedDirectories() {
+    return protectedDirectories;
+  }
+
   BlockManager getBlockManager() {
   BlockManager getBlockManager() {
     return getFSNamesystem().getBlockManager();
     return getFSNamesystem().getBlockManager();
   }
   }
@@ -909,6 +940,38 @@ public class FSDirectory implements Closeable {
         && INodeReference.tryRemoveReference(last) > 0) ? 0 : 1;
         && INodeReference.tryRemoveReference(last) > 0) ? 0 : 1;
   }
   }
 
 
+  /**
+   * Return a new collection of normalized paths from the given input
+   * collection. The input collection is unmodified.
+   *
+   * Reserved paths, relative paths and paths with scheme are ignored.
+   *
+   * @param paths collection whose contents are to be normalized.
+   * @return collection with all input paths normalized.
+   */
+  static Collection<String> normalizePaths(Collection<String> paths,
+                                           String errorString) {
+    if (paths.isEmpty()) {
+      return paths;
+    }
+    final Collection<String> normalized = new ArrayList<>(paths.size());
+    for (String dir : paths) {
+      if (isReservedName(dir)) {
+        LOG.error("{} ignoring reserved path {}", errorString, dir);
+      } else {
+        final Path path = new Path(dir);
+        if (!path.isAbsolute()) {
+          LOG.error("{} ignoring relative path {}", errorString, dir);
+        } else if (path.toUri().getScheme() != null) {
+          LOG.error("{} ignoring path {} with scheme", errorString, dir);
+        } else {
+          normalized.add(path.toString());
+        }
+      }
+    }
+    return normalized;
+  }
+
   static String normalizePath(String src) {
   static String normalizePath(String src) {
     if (src.length() > 1 && src.endsWith("/")) {
     if (src.length() > 1 && src.endsWith("/")) {
       src = src.substring(0, src.length() - 1);
       src = src.substring(0, src.length() - 1);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -980,7 +980,7 @@ public class FSEditLogLoader {
       
       
       oldLastBlock.setNumBytes(pBlock.getNumBytes());
       oldLastBlock.setNumBytes(pBlock.getNumBytes());
       if (!oldLastBlock.isComplete()) {
       if (!oldLastBlock.isComplete()) {
-        fsNamesys.getBlockManager().forceCompleteBlock(file, oldLastBlock);
+        fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
         fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
         fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
       }
       }
     } else { // the penultimate block is null
     } else { // the penultimate block is null
@@ -1041,7 +1041,7 @@ public class FSEditLogLoader {
       if (!oldBlock.isComplete() &&
       if (!oldBlock.isComplete() &&
           (!isLastBlock || op.shouldCompleteLastBlock())) {
           (!isLastBlock || op.shouldCompleteLastBlock())) {
         changeMade = true;
         changeMade = true;
-        fsNamesys.getBlockManager().forceCompleteBlock(file, oldBlock);
+        fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
       }
       }
       if (changeMade) {
       if (changeMade) {
         // The state or gen-stamp of the block has changed. So, we may be
         // The state or gen-stamp of the block has changed. So, we may be

+ 254 - 100
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -4518,42 +4518,46 @@ public abstract class FSEditLogOp {
   /**
   /**
    * Class for reading editlog ops from a stream
    * Class for reading editlog ops from a stream
    */
    */
-  public static class Reader {
-    private final DataInputStream in;
-    private final StreamLimiter limiter;
-    private final int logVersion;
-    private final Checksum checksum;
-    private final OpInstanceCache cache;
-    private int maxOpSize;
-    private final boolean supportEditLogLength;
+  public abstract static class Reader {
+    final DataInputStream in;
+    final StreamLimiter limiter;
+    final OpInstanceCache cache;
+    final byte[] temp = new byte[4096];
+    final int logVersion;
+    int maxOpSize;
+
+    public static Reader create(DataInputStream in, StreamLimiter limiter,
+                                int logVersion) {
+      if (logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
+        // Use the LengthPrefixedReader on edit logs which are newer than what
+        // we can parse.  (Newer layout versions are represented by smaller
+        // negative integers, for historical reasons.) Even though we can't
+        // parse the Ops contained in them, we should still be able to call
+        // scanOp on them.  This is important for the JournalNode during rolling
+        // upgrade.
+        return new LengthPrefixedReader(in, limiter, logVersion);
+      } else if (NameNodeLayoutVersion.supports(
+              NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)) {
+        return new LengthPrefixedReader(in, limiter, logVersion);
+      } else if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.EDITS_CHECKSUM, logVersion)) {
+        Checksum checksum = DataChecksum.newCrc32();
+        return new ChecksummedReader(checksum, in, limiter, logVersion);
+      } else {
+        return new LegacyReader(in, limiter, logVersion);
+      }
+    }
 
 
     /**
     /**
      * Construct the reader
      * Construct the reader
-     * @param in The stream to read from.
-     * @param logVersion The version of the data coming from the stream.
+     * @param in            The stream to read from.
+     * @param limiter       The limiter for this stream.
+     * @param logVersion    The version of the data coming from the stream.
      */
      */
-    public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
-      this.logVersion = logVersion;
-      if (NameNodeLayoutVersion.supports(
-          LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
-        this.checksum = DataChecksum.newCrc32();
-      } else {
-        this.checksum = null;
-      }
-      // It is possible that the logVersion is actually a future layoutversion
-      // during the rolling upgrade (e.g., the NN gets upgraded first). We
-      // assume future layout will also support length of editlog op.
-      this.supportEditLogLength = NameNodeLayoutVersion.supports(
-          NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
-          || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
-
-      if (this.checksum != null) {
-        this.in = new DataInputStream(
-            new CheckedInputStream(in, this.checksum));
-      } else {
-        this.in = in;
-      }
+    Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
+      this.in = in;
       this.limiter = limiter;
       this.limiter = limiter;
+      this.logVersion = logVersion;
       this.cache = new OpInstanceCache();
       this.cache = new OpInstanceCache();
       this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
       this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
     }
     }
@@ -4606,26 +4610,25 @@ public abstract class FSEditLogOp {
       }
       }
     }
     }
 
 
-    private void verifyTerminator() throws IOException {
+    void verifyTerminator() throws IOException {
       /** The end of the edit log should contain only 0x00 or 0xff bytes.
       /** The end of the edit log should contain only 0x00 or 0xff bytes.
        * If it contains other bytes, the log itself may be corrupt.
        * If it contains other bytes, the log itself may be corrupt.
        * It is important to check this; if we don't, a stray OP_INVALID byte 
        * It is important to check this; if we don't, a stray OP_INVALID byte 
        * could make us stop reading the edit log halfway through, and we'd never
        * could make us stop reading the edit log halfway through, and we'd never
        * know that we had lost data.
        * know that we had lost data.
        */
        */
-      byte[] buf = new byte[4096];
       limiter.clearLimit();
       limiter.clearLimit();
       int numRead = -1, idx = 0;
       int numRead = -1, idx = 0;
       while (true) {
       while (true) {
         try {
         try {
           numRead = -1;
           numRead = -1;
           idx = 0;
           idx = 0;
-          numRead = in.read(buf);
+          numRead = in.read(temp);
           if (numRead == -1) {
           if (numRead == -1) {
             return;
             return;
           }
           }
           while (idx < numRead) {
           while (idx < numRead) {
-            if ((buf[idx] != (byte)0) && (buf[idx] != (byte)-1)) {
+            if ((temp[idx] != (byte)0) && (temp[idx] != (byte)-1)) {
               throw new IOException("Read extra bytes after " +
               throw new IOException("Read extra bytes after " +
                 "the terminator!");
                 "the terminator!");
             }
             }
@@ -4638,7 +4641,7 @@ public abstract class FSEditLogOp {
           if (numRead != -1) { 
           if (numRead != -1) { 
             in.reset();
             in.reset();
             IOUtils.skipFully(in, idx);
             IOUtils.skipFully(in, idx);
-            in.mark(buf.length + 1);
+            in.mark(temp.length + 1);
             IOUtils.skipFully(in, 1);
             IOUtils.skipFully(in, 1);
           }
           }
         }
         }
@@ -4653,14 +4656,164 @@ public abstract class FSEditLogOp {
      * If an exception is thrown, the stream's mark will be set to the first
      * If an exception is thrown, the stream's mark will be set to the first
      * problematic byte.  This usually means the beginning of the opcode.
      * problematic byte.  This usually means the beginning of the opcode.
      */
      */
-    private FSEditLogOp decodeOp() throws IOException {
-      limiter.setLimit(maxOpSize);
+    public abstract FSEditLogOp decodeOp() throws IOException;
+
+    /**
+     * Similar to decodeOp(), but we only retrieve the transaction ID of the
+     * Op rather than reading it.  If the edit log format supports length
+     * prefixing, this can be much faster than full decoding.
+     *
+     * @return the last txid of the segment, or INVALID_TXID on EOF.
+     */
+    public abstract long scanOp() throws IOException;
+  }
+
+  /**
+   * Reads edit logs which are prefixed with a length.  These edit logs also
+   * include a checksum and transaction ID.
+   */
+  private static class LengthPrefixedReader extends Reader {
+    /**
+     * The minimum length of a length-prefixed Op.
+     *
+     * The minimum Op has:
+     * 1-byte opcode
+     * 4-byte length
+     * 8-byte txid
+     * 0-byte body
+     * 4-byte checksum
+     */
+    private static final int MIN_OP_LENGTH = 17;
+
+    /**
+     * The op id length.
+     *
+     * Not included in the stored length.
+     */
+    private static final int OP_ID_LENGTH = 1;
+
+    /**
+     * The checksum length.
+     *
+     * Not included in the stored length.
+     */
+    private static final int CHECKSUM_LENGTH = 4;
+
+    private final Checksum checksum;
+
+    LengthPrefixedReader(DataInputStream in, StreamLimiter limiter,
+                         int logVersion) {
+      super(in, limiter, logVersion);
+      this.checksum = DataChecksum.newCrc32();
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      long txid = decodeOpFrame();
+      if (txid == HdfsServerConstants.INVALID_TXID) {
+        return null;
+      }
+      in.reset();
       in.mark(maxOpSize);
       in.mark(maxOpSize);
+      FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(in.readByte());
+      FSEditLogOp op = cache.get(opCode);
+      if (op == null) {
+        throw new IOException("Read invalid opcode " + opCode);
+      }
+      op.setTransactionId(txid);
+      IOUtils.skipFully(in, 4 + 8); // skip length and txid
+      op.readFields(in, logVersion);
+      // skip over the checksum, which we validated above.
+      IOUtils.skipFully(in, CHECKSUM_LENGTH);
+      return op;
+    }
+
+    @Override
+    public long scanOp() throws IOException {
+      return decodeOpFrame();
+    }
 
 
-      if (checksum != null) {
-        checksum.reset();
+    /**
+     * Decode the opcode "frame".  This includes reading the opcode and
+     * transaction ID, and validating the checksum and length.  It does not
+     * include reading the opcode-specific fields.
+     * The input stream will be advanced to the end of the op at the end of this
+     * function.
+     *
+     * @return        An op with the txid set, but none of the other fields
+     *                  filled in, or null if we hit EOF.
+     */
+    private long decodeOpFrame() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      byte opCodeByte;
+      try {
+        opCodeByte = in.readByte();
+      } catch (EOFException eof) {
+        // EOF at an opcode boundary is expected.
+        return HdfsServerConstants.INVALID_TXID;
       }
       }
+      if (opCodeByte == FSEditLogOpCodes.OP_INVALID.getOpCode()) {
+        verifyTerminator();
+        return HdfsServerConstants.INVALID_TXID;
+      }
+      // Here, we verify that the Op size makes sense and that the
+      // data matches its checksum before attempting to construct an Op.
+      // This is important because otherwise we may encounter an
+      // OutOfMemoryException which could bring down the NameNode or
+      // JournalNode when reading garbage data.
+      int opLength =  in.readInt() + OP_ID_LENGTH + CHECKSUM_LENGTH;
+      if (opLength > maxOpSize) {
+        throw new IOException("Op " + (int)opCodeByte + " has size " +
+            opLength + ", but maxOpSize = " + maxOpSize);
+      } else  if (opLength < MIN_OP_LENGTH) {
+        throw new IOException("Op " + (int)opCodeByte + " has size " +
+            opLength + ", but the minimum op size is " + MIN_OP_LENGTH);
+      }
+      long txid = in.readLong();
+      // Verify checksum
+      in.reset();
+      in.mark(maxOpSize);
+      checksum.reset();
+      for (int rem = opLength - CHECKSUM_LENGTH; rem > 0;) {
+        int toRead = Math.min(temp.length, rem);
+        IOUtils.readFully(in, temp, 0, toRead);
+        checksum.update(temp, 0, toRead);
+        rem -= toRead;
+      }
+      int expectedChecksum = in.readInt();
+      int calculatedChecksum = (int)checksum.getValue();
+      if (expectedChecksum != calculatedChecksum) {
+        throw new ChecksumException(
+            "Transaction is corrupt. Calculated checksum is " +
+            calculatedChecksum + " but read checksum " +
+            expectedChecksum, txid);
+      }
+      return txid;
+    }
+  }
+
+  /**
+   * Read edit logs which have a checksum and a transaction ID, but not a
+   * length.
+   */
+  private static class ChecksummedReader extends Reader {
+    private final Checksum checksum;
 
 
+    ChecksummedReader(Checksum checksum, DataInputStream in,
+                      StreamLimiter limiter, int logVersion) {
+      super(new DataInputStream(
+          new CheckedInputStream(in, checksum)), limiter, logVersion);
+      this.checksum = checksum;
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      // Reset the checksum.  Since we are using a CheckedInputStream, each
+      // subsequent read from the  stream will update the checksum.
+      checksum.reset();
       byte opCodeByte;
       byte opCodeByte;
       try {
       try {
         opCodeByte = in.readByte();
         opCodeByte = in.readByte();
@@ -4668,88 +4821,89 @@ public abstract class FSEditLogOp {
         // EOF at an opcode boundary is expected.
         // EOF at an opcode boundary is expected.
         return null;
         return null;
       }
       }
-
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
       if (opCode == OP_INVALID) {
       if (opCode == OP_INVALID) {
         verifyTerminator();
         verifyTerminator();
         return null;
         return null;
       }
       }
-
       FSEditLogOp op = cache.get(opCode);
       FSEditLogOp op = cache.get(opCode);
       if (op == null) {
       if (op == null) {
         throw new IOException("Read invalid opcode " + opCode);
         throw new IOException("Read invalid opcode " + opCode);
       }
       }
-
-      if (supportEditLogLength) {
-        in.readInt();
+      op.setTransactionId(in.readLong());
+      op.readFields(in, logVersion);
+      // Verify checksum
+      int calculatedChecksum = (int)checksum.getValue();
+      int expectedChecksum = in.readInt();
+      if (expectedChecksum != calculatedChecksum) {
+        throw new ChecksumException(
+            "Transaction is corrupt. Calculated checksum is " +
+                calculatedChecksum + " but read checksum " +
+                expectedChecksum, op.txid);
       }
       }
+      return op;
+    }
 
 
+    @Override
+    public long scanOp() throws IOException {
+      // Edit logs of this age don't have any length prefix, so we just have
+      // to read the entire Op.
+      FSEditLogOp op = decodeOp();
+      return op == null ?
+          HdfsServerConstants.INVALID_TXID : op.getTransactionId();
+    }
+  }
+
+  /**
+   * Read older edit logs which may or may not have transaction IDs and other
+   * features.  This code is used during upgrades and to allow HDFS INotify to
+   * read older edit log files.
+   */
+  private static class LegacyReader extends Reader {
+    LegacyReader(DataInputStream in,
+                      StreamLimiter limiter, int logVersion) {
+      super(in, limiter, logVersion);
+    }
+
+    @Override
+    public FSEditLogOp decodeOp() throws IOException {
+      limiter.setLimit(maxOpSize);
+      in.mark(maxOpSize);
+      byte opCodeByte;
+      try {
+        opCodeByte = in.readByte();
+      } catch (EOFException eof) {
+        // EOF at an opcode boundary is expected.
+        return null;
+      }
+      FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
+      if (opCode == OP_INVALID) {
+        verifyTerminator();
+        return null;
+      }
+      FSEditLogOp op = cache.get(opCode);
+      if (op == null) {
+        throw new IOException("Read invalid opcode " + opCode);
+      }
       if (NameNodeLayoutVersion.supports(
       if (NameNodeLayoutVersion.supports(
-          LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
-        // Read the txid
+            LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
         op.setTransactionId(in.readLong());
         op.setTransactionId(in.readLong());
       } else {
       } else {
         op.setTransactionId(HdfsServerConstants.INVALID_TXID);
         op.setTransactionId(HdfsServerConstants.INVALID_TXID);
       }
       }
-
       op.readFields(in, logVersion);
       op.readFields(in, logVersion);
-
-      validateChecksum(in, checksum, op.txid);
       return op;
       return op;
     }
     }
 
 
-    /**
-     * Similar with decodeOp(), but instead of doing the real decoding, we skip
-     * the content of the op if the length of the editlog is supported.
-     * @return the last txid of the segment, or INVALID_TXID on exception
-     */
+    @Override
     public long scanOp() throws IOException {
     public long scanOp() throws IOException {
-      if (supportEditLogLength) {
-        limiter.setLimit(maxOpSize);
-        in.mark(maxOpSize);
-
-        final byte opCodeByte;
-        try {
-          opCodeByte = in.readByte(); // op code
-        } catch (EOFException e) {
-          return HdfsServerConstants.INVALID_TXID;
-        }
-
-        FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
-        if (opCode == OP_INVALID) {
-          verifyTerminator();
-          return HdfsServerConstants.INVALID_TXID;
-        }
-
-        int length = in.readInt(); // read the length of the op
-        long txid = in.readLong(); // read the txid
-
-        // skip the remaining content
-        IOUtils.skipFully(in, length - 8); 
-        // TODO: do we want to verify checksum for JN? For now we don't.
-        return txid;
-      } else {
-        FSEditLogOp op = decodeOp();
-        return op == null ? HdfsServerConstants.INVALID_TXID : op.getTransactionId();
-      }
-    }
-
-    /**
-     * Validate a transaction's checksum
-     */
-    private void validateChecksum(DataInputStream in,
-                                  Checksum checksum,
-                                  long txid)
-        throws IOException {
-      if (checksum != null) {
-        int calculatedChecksum = (int)checksum.getValue();
-        int readChecksum = in.readInt(); // read in checksum
-        if (readChecksum != calculatedChecksum) {
-          throw new ChecksumException(
-              "Transaction is corrupt. Calculated checksum is " +
-              calculatedChecksum + " but read checksum " + readChecksum, txid);
-        }
+      if (!NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
+        throw new IOException("Can't scan a pre-transactional edit log.");
       }
       }
+      FSEditLogOp op = decodeOp();
+      return op == null ?
+          HdfsServerConstants.INVALID_TXID : op.getTransactionId();
     }
     }
   }
   }
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -1967,7 +1968,7 @@ public class DFSAdmin extends FsShell {
 
 
     // Create the client
     // Create the client
     ClientDatanodeProtocol dnProtocol =     
     ClientDatanodeProtocol dnProtocol =     
-        DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
+        DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
             NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
             NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
     return dnProtocol;
     return dnProtocol;
   }
   }

+ 25 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md

@@ -221,6 +221,31 @@ Below are examples using the `curl` command tool.
 
 
 See also: [Authentication for Hadoop HTTP web-consoles](../hadoop-common/HttpAuthentication.html)
 See also: [Authentication for Hadoop HTTP web-consoles](../hadoop-common/HttpAuthentication.html)
 
 
+Additionally, WebHDFS supports OAuth2 on the client side. The Namenode and Datanodes do not currently support clients using OAuth2 but other backends that implement the WebHDFS REST interface may.
+
+WebHDFS supports two type of OAuth2 code grants (user-provided refresh and access token or user provided credential) by default and provides a pluggable mechanism for implementing other OAuth2 authentications per the [OAuth2 RFC](https://tools.ietf.org/html/rfc6749), or custom authentications.  When using either of the provided code grant mechanisms, the WebHDFS client will refresh the access token as necessary.
+
+OAuth2 should only be enabled for clients not running with Kerberos SPENGO.
+
+| OAuth2 code grant mechanism | Description | Value of `dfs.webhdfs.oauth2.access.token.provider` that implements code grant |
+|:---- |:---- |:----|
+| Authorization Code Grant | The user provides an initial access token and refresh token, which are then used to authenticate WebHDFS requests and obtain replacement access tokens, respectively. | org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider |
+| Client Credentials Grant | The user provides a credential which is used to obtain access tokens, which are then used to authenticate WebHDFS requests. | org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider |
+
+
+The following properties control OAuth2 authentication.
+
+| OAuth2 related property | Description |
+|:---- |:---- |
+| `dfs.webhdfs.oauth2.enabled` | Boolean to enable/disable OAuth2 authentication |
+| `dfs.webhdfs.oauth2.access.token.provider` | Class name of an implementation of `org.apache.hadoop.hdfs.web.oauth.AccessTokenProvider.`  Two are provided with the code, as described above, or the user may specify a user-provided implementation. The default value for this configuration key is the `ConfCredentialBasedAccessTokenProvider` implementation. |
+| `dfs.webhdfs.oauth2.client.id` | Client id used to obtain access token with either credential or refresh token |
+| `dfs.webhdfs.oauth2.refresh.url` | URL against which to post for obtaining bearer token with either credential or refresh token |
+| `dfs.webhdfs.oauth2.access.token` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Initial access token with which to authenticate |
+| `dfs.webhdfs.oauth2.refresh.token` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Initial refresh token to use to obtain new access tokens  |
+| `dfs.webhdfs.oauth2.refresh.token.expires.ms.since.epoch` | (required if using ConfRefreshTokenBasedAccessTokenProvider) Access token expiration measured in milliseconds since Jan 1, 1970.  *Note this is a different value than provided by OAuth providers and has been munged as described in interface to be suitable for a client application*  |
+| `dfs.webhdfs.oauth2.credential` | (required if using ConfCredentialBasedAccessTokenProvider).  Credential used to obtain initial and subsequent access tokens. |
+
 Proxy Users
 Proxy Users
 -----------
 -----------
 
 

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java

@@ -247,7 +247,7 @@ public class TestBlockReaderLocal {
   @Test
   @Test
   public void testBlockReaderSimpleReads() throws IOException {
   public void testBlockReaderSimpleReads() throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true,
     runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -259,7 +259,7 @@ public class TestBlockReaderLocal {
   @Test
   @Test
   public void testBlockReaderSimpleReadsNoChecksum() throws IOException {
   public void testBlockReaderSimpleReadsNoChecksum() throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false,
     runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -297,14 +297,14 @@ public class TestBlockReaderLocal {
   @Test
   @Test
   public void testBlockReaderLocalArrayReads2() throws IOException {
   public void testBlockReaderLocalArrayReads2() throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
     runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
-        true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
   public void testBlockReaderLocalArrayReads2NoChecksum()
   public void testBlockReaderLocalArrayReads2NoChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
     runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
-        false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -341,7 +341,7 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalByteBufferReads()
   public void testBlockReaderLocalByteBufferReads()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(),
     runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(),
-        true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -349,7 +349,7 @@ public class TestBlockReaderLocal {
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(
     runBlockReaderLocalTest(
         new TestBlockReaderLocalByteBufferReads(),
         new TestBlockReaderLocalByteBufferReads(),
-        false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
   
   
   @Test
   @Test
@@ -473,7 +473,7 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalReadCorruptStart()
   public void testBlockReaderLocalReadCorruptStart()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorruptStart(), true,
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorruptStart(), true,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
   
   
   private static class TestBlockReaderLocalReadCorrupt
   private static class TestBlockReaderLocalReadCorrupt
@@ -524,14 +524,14 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalReadCorrupt()
   public void testBlockReaderLocalReadCorrupt()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), true,
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), true,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
   public void testBlockReaderLocalReadCorruptNoChecksum()
   public void testBlockReaderLocalReadCorruptNoChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), false,
     runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), false,
-        DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -576,14 +576,14 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalWithMlockChanges()
   public void testBlockReaderLocalWithMlockChanges()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
     runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
-        true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
   public void testBlockReaderLocalWithMlockChangesNoChecksum()
   public void testBlockReaderLocalWithMlockChangesNoChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
     runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
-        false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -649,14 +649,14 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalOnFileWithoutChecksum()
   public void testBlockReaderLocalOnFileWithoutChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
     runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
-        true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
   public void testBlockReaderLocalOnFileWithoutChecksumNoChecksum()
   public void testBlockReaderLocalOnFileWithoutChecksumNoChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
     runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
-        false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
@@ -677,14 +677,14 @@ public class TestBlockReaderLocal {
   public void testBlockReaderLocalReadZeroBytes()
   public void testBlockReaderLocalReadZeroBytes()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
     runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
-        true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test
   public void testBlockReaderLocalReadZeroBytesNoChecksum()
   public void testBlockReaderLocalReadZeroBytesNoChecksum()
       throws IOException {
       throws IOException {
     runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
     runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
-        false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+        false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
   }
   }
 
 
   @Test
   @Test

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java

@@ -182,7 +182,7 @@ public class TestBlockReaderLocalLegacy {
     {
     {
       final LocatedBlock lb = cluster.getNameNode().getRpcServer()
       final LocatedBlock lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations(path.toString(), 0, 1).get(0);
           .getBlockLocations(path.toString(), 0, 1).get(0);
-      proxy = DFSUtil.createClientDatanodeProtocolProxy(
+      proxy = DFSUtilClient.createClientDatanodeProtocolProxy(
           lb.getLocations()[0], conf, 60000, false);
           lb.getLocations()[0], conf, 60000, false);
       token = lb.getBlockToken();
       token = lb.getBlockToken();
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java

@@ -24,10 +24,10 @@ import static org.mockito.Mockito.verify;
 
 
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
@@ -41,7 +41,7 @@ public class TestClientBlockVerification {
   static LocatedBlock testBlock = null;
   static LocatedBlock testBlock = null;
 
 
   static {
   static {
-    ((Log4JLogger)RemoteBlockReader2.LOG).getLogger().setLevel(Level.ALL);
+    GenericTestUtils.setLogLevel(RemoteBlockReader2.LOG, Level.ALL);
   }
   }
   @BeforeClass
   @BeforeClass
   public static void setupCluster() throws Exception {
   public static void setupCluster() throws Exception {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -860,7 +860,7 @@ public class TestDFSClientRetries {
     ClientDatanodeProtocol proxy = null;
     ClientDatanodeProtocol proxy = null;
 
 
     try {
     try {
-      proxy = DFSUtil.createClientDatanodeProtocolProxy(
+      proxy = DFSUtilClient.createClientDatanodeProtocolProxy(
           fakeDnId, conf, 500, false, fakeBlock);
           fakeDnId, conf, 500, false, fakeBlock);
 
 
       proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
       proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));

+ 6 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

@@ -812,16 +812,13 @@ public class TestDecommission {
       }
       }
       assertEquals("Number of live nodes should be 0", 0, info.length);
       assertEquals("Number of live nodes should be 0", 0, info.length);
       
       
-      // Test that non-live and bogus hostnames are considered "dead".
-      // The dead report should have an entry for (1) the DN  that is
-      // now considered dead because it is no longer allowed to connect
-      // and (2) the bogus entry in the hosts file (these entries are
-      // always added last)
+      // Test that bogus hostnames are considered "dead".
+      // The dead report should have an entry for the bogus entry in the hosts
+      // file.  The original datanode is excluded from the report because it
+      // is no longer in the included list.
       info = client.datanodeReport(DatanodeReportType.DEAD);
       info = client.datanodeReport(DatanodeReportType.DEAD);
-      assertEquals("There should be 2 dead nodes", 2, info.length);
-      DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
-      assertEquals(id.getHostName(), info[0].getHostName());
-      assertEquals(bogusIp, info[1].getHostName());
+      assertEquals("There should be 1 dead node", 1, info.length);
+      assertEquals(bogusIp, info[0].getHostName());
     }
     }
   }
   }
   
   

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -360,12 +360,12 @@ public class TestPBHelper {
   public void testConvertExtendedBlock() {
   public void testConvertExtendedBlock() {
     ExtendedBlock b = getExtendedBlock();
     ExtendedBlock b = getExtendedBlock();
     ExtendedBlockProto bProto = PBHelperClient.convert(b);
     ExtendedBlockProto bProto = PBHelperClient.convert(b);
-    ExtendedBlock b1 = PBHelper.convert(bProto);
+    ExtendedBlock b1 = PBHelperClient.convert(bProto);
     assertEquals(b, b1);
     assertEquals(b, b1);
     
     
     b.setBlockId(-1);
     b.setBlockId(-1);
     bProto = PBHelperClient.convert(b);
     bProto = PBHelperClient.convert(b);
-    b1 = PBHelper.convert(bProto);
+    b1 = PBHelperClient.convert(bProto);
     assertEquals(b, b1);
     assertEquals(b, b1);
   }
   }
   
   

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Client
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -138,7 +138,7 @@ public class TestBlockToken {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
         LOG.info("Got: " + id.toString());
         assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
         assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
-        sm.checkAccess(id, null, PBHelper.convert(req.getBlock()),
+        sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()),
             BlockTokenIdentifier.AccessMode.WRITE);
             BlockTokenIdentifier.AccessMode.WRITE);
         result = id.getBlockId();
         result = id.getBlockId();
       }
       }
@@ -259,7 +259,7 @@ public class TestBlockToken {
 
 
     ClientDatanodeProtocol proxy = null;
     ClientDatanodeProtocol proxy = null;
     try {
     try {
-      proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
+      proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf,
           NetUtils.getDefaultSocketFactory(conf));
           NetUtils.getDefaultSocketFactory(conf));
       assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
       assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
     } finally {
     } finally {
@@ -313,7 +313,7 @@ public class TestBlockToken {
     try {
     try {
       long endTime = Time.now() + 3000;
       long endTime = Time.now() + 3000;
       while (Time.now() < endTime) {
       while (Time.now() < endTime) {
-        proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
+        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
             false, fakeBlock);
             false, fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         if (proxy != null) {
         if (proxy != null) {

+ 0 - 30
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java

@@ -196,36 +196,6 @@ public class TestBlockInfoStriped {
     }
     }
   }
   }
 
 
-  @Test
-  public void testReplaceBlock() {
-    DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(
-        TOTAL_NUM_BLOCKS);
-    Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
-    // add block/storage 0, 2, 4 into the BlockInfoStriped
-    for (int i = 0; i < storages.length; i += 2) {
-      Assert.assertEquals(AddBlockResult.ADDED,
-          storages[i].addBlock(info, blocks[i]));
-    }
-
-    BlockInfoStriped newBlockInfo = new BlockInfoStriped(info,
-        info.getErasureCodingPolicy());
-    newBlockInfo.setBlockCollectionId(info.getBlockCollectionId());
-    info.replaceBlock(newBlockInfo);
-
-    // make sure the newBlockInfo is correct
-    byte[] indices = (byte[]) Whitebox.getInternalState(newBlockInfo, "indices");
-    for (int i = 0; i < storages.length; i += 2) {
-      int index = newBlockInfo.findStorageInfo(storages[i]);
-      Assert.assertEquals(i, index);
-      Assert.assertEquals(index, indices[i]);
-
-      // make sure the newBlockInfo is added to the linked list of the storage
-      Assert.assertSame(newBlockInfo, storages[i].getBlockListHeadForTesting());
-      Assert.assertEquals(1, storages[i].numBlocks());
-      Assert.assertNull(newBlockInfo.getNext());
-    }
-  }
-
   @Test
   @Test
   public void testWrite() {
   public void testWrite() {
     long blkID = 1;
     long blkID = 1;

+ 102 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -19,11 +19,13 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
@@ -38,17 +40,23 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 import static org.hamcrest.core.Is.is;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
@@ -68,6 +76,15 @@ public class TestDatanodeManager {
     return dm;
     return dm;
   }
   }
 
 
+  /**
+   * Create an InetSocketAddress for a host:port string
+   * @param host a host identifier in host:port format
+   * @return a corresponding InetSocketAddress object
+   */
+  private static InetSocketAddress entry(String host) {
+    return HostFileManager.parseEntry("dummy", "dummy", host);
+  }
+
   /**
   /**
    * This test sends a random sequence of node registrations and node removals
    * This test sends a random sequence of node registrations and node removals
    * to the DatanodeManager (of nodes with different IDs and versions), and
    * to the DatanodeManager (of nodes with different IDs and versions), and
@@ -352,5 +369,89 @@ public class TestDatanodeManager {
     assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(),
     assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(),
       is(DatanodeInfo.AdminStates.DECOMMISSIONED));
       is(DatanodeInfo.AdminStates.DECOMMISSIONED));
   }
   }
-}
 
 
+  /**
+   * Test whether removing a host from the includes list without adding it to
+   * the excludes list will exclude it from data node reports.
+   */
+  @Test
+  public void testRemoveIncludedNode() throws IOException {
+    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+
+    // Set the write lock so that the DatanodeManager can start
+    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+
+    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
+    HostFileManager hm = new HostFileManager();
+    HostFileManager.HostSet noNodes = new HostFileManager.HostSet();
+    HostFileManager.HostSet oneNode = new HostFileManager.HostSet();
+    HostFileManager.HostSet twoNodes = new HostFileManager.HostSet();
+    DatanodeRegistration dr1 = new DatanodeRegistration(
+      new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123",
+          12345, 12345, 12345, 12345),
+      new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
+      new ExportedBlockKeys(), "test");
+    DatanodeRegistration dr2 = new DatanodeRegistration(
+      new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234",
+          23456, 23456, 23456, 23456),
+      new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
+      new ExportedBlockKeys(), "test");
+
+    twoNodes.add(entry("127.0.0.1:12345"));
+    twoNodes.add(entry("127.0.0.1:23456"));
+    oneNode.add(entry("127.0.0.1:23456"));
+
+    hm.refresh(twoNodes, noNodes);
+    Whitebox.setInternalState(dm, "hostFileManager", hm);
+
+    // Register two data nodes to simulate them coming up.
+    // We need to add two nodes, because if we have only one node, removing it
+    // will cause the includes list to be empty, which means all hosts will be
+    // allowed.
+    dm.registerDatanode(dr1);
+    dm.registerDatanode(dr2);
+
+    // Make sure that both nodes are reported
+    List<DatanodeDescriptor> both =
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
+
+    // Sort the list so that we know which one is which
+    Collections.sort(both);
+
+    Assert.assertEquals("Incorrect number of hosts reported",
+        2, both.size());
+    Assert.assertEquals("Unexpected host or host in unexpected position",
+        "127.0.0.1:12345", both.get(0).getInfoAddr());
+    Assert.assertEquals("Unexpected host or host in unexpected position",
+        "127.0.0.1:23456", both.get(1).getInfoAddr());
+
+    // Remove one node from includes, but do not add it to excludes.
+    hm.refresh(oneNode, noNodes);
+
+    // Make sure that only one node is still reported
+    List<DatanodeDescriptor> onlyOne =
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
+
+    Assert.assertEquals("Incorrect number of hosts reported",
+        1, onlyOne.size());
+    Assert.assertEquals("Unexpected host reported",
+        "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
+
+    // Remove all nodes from includes
+    hm.refresh(noNodes, noNodes);
+
+    // Check that both nodes are reported again
+    List<DatanodeDescriptor> bothAgain =
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
+
+    // Sort the list so that we know which one is which
+    Collections.sort(bothAgain);
+
+    Assert.assertEquals("Incorrect number of hosts reported",
+        2, bothAgain.size());
+    Assert.assertEquals("Unexpected host or host in unexpected position",
+        "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
+    Assert.assertEquals("Unexpected host or host in unexpected position",
+        "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
+  }
+}

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java

@@ -104,23 +104,22 @@ public class TestHostFileManager {
     BlockManager bm = mock(BlockManager.class);
     BlockManager bm = mock(BlockManager.class);
     FSNamesystem fsn = mock(FSNamesystem.class);
     FSNamesystem fsn = mock(FSNamesystem.class);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    HostFileManager hm = mock(HostFileManager.class);
+    HostFileManager hm = new HostFileManager();
     HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
     HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
     HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
     HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
 
 
     includedNodes.add(entry("127.0.0.1:12345"));
     includedNodes.add(entry("127.0.0.1:12345"));
     includedNodes.add(entry("localhost:12345"));
     includedNodes.add(entry("localhost:12345"));
     includedNodes.add(entry("127.0.0.1:12345"));
     includedNodes.add(entry("127.0.0.1:12345"));
-
     includedNodes.add(entry("127.0.0.2"));
     includedNodes.add(entry("127.0.0.2"));
+
     excludedNodes.add(entry("127.0.0.1:12346"));
     excludedNodes.add(entry("127.0.0.1:12346"));
     excludedNodes.add(entry("127.0.30.1:12346"));
     excludedNodes.add(entry("127.0.30.1:12346"));
 
 
     Assert.assertEquals(2, includedNodes.size());
     Assert.assertEquals(2, includedNodes.size());
     Assert.assertEquals(2, excludedNodes.size());
     Assert.assertEquals(2, excludedNodes.size());
 
 
-    doReturn(includedNodes).when(hm).getIncludes();
-    doReturn(excludedNodes).when(hm).getExcludes();
+    hm.refresh(includedNodes, excludedNodes);
 
 
     DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
     DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
     Whitebox.setInternalState(dm, "hostFileManager", hm);
     Whitebox.setInternalState(dm, "hostFileManager", hm);

+ 25 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -181,7 +181,7 @@ public class TestReplicationPolicy {
    * considered.
    * considered.
    */
    */
   @Test
   @Test
-  public void testChooseNodeWithMultipleStorages() throws Exception {
+  public void testChooseNodeWithMultipleStorages1() throws Exception {
     updateHeartbeatWithUsage(dataNodes[5],
     updateHeartbeatWithUsage(dataNodes[5],
         2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
         2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
         (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
         (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
@@ -200,6 +200,30 @@ public class TestReplicationPolicy {
     resetHeartbeatForStorages();
     resetHeartbeatForStorages();
   }
   }
 
 
+  /**
+   * Test whether all storages on the datanode are considered while
+   * choosing target to place block.
+   */
+  @Test
+  public void testChooseNodeWithMultipleStorages2() throws Exception {
+    updateHeartbeatWithUsage(dataNodes[5],
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
+        0L, 0L, 0, 0);
+
+    updateHeartbeatForExtraStorage(
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
+
+    DatanodeStorageInfo[] targets;
+    targets = chooseTarget (1, dataNodes[5],
+        new ArrayList<DatanodeStorageInfo>(), null);
+    assertEquals(1, targets.length);
+    assertEquals(dataNodes[5], targets[0].getDatanodeDescriptor());
+
+    resetHeartbeatForStorages();
+  }
+
   /**
   /**
    * In this testcase, client is dataNodes[0]. So the 1st replica should be
    * In this testcase, client is dataNodes[0]. So the 1st replica should be
    * placed on dataNodes[0], the 2nd replica should be placed on 
    * placed on dataNodes[0], the 2nd replica should be placed on 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -875,7 +875,7 @@ public class TestEditLog {
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       in = new DataInputStream(tracker);
       in = new DataInputStream(tracker);
             
             
-      reader = new FSEditLogOp.Reader(in, tracker, version);
+      reader = FSEditLogOp.Reader.create(in, tracker, version);
     }
     }
   
   
     @Override
     @Override

+ 80 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java

@@ -25,19 +25,35 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 import java.util.EnumMap;
 import java.util.EnumMap;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 public class TestEditLogFileInputStream {
 public class TestEditLogFileInputStream {
+  private static final Log LOG =
+      LogFactory.getLog(TestEditLogFileInputStream.class);
   private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS;
   private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS;
 
 
+  private final static File TEST_DIR = PathUtils
+      .getTestDir(TestEditLogFileInputStream.class);
+
   @Test
   @Test
   public void testReadURL() throws Exception {
   public void testReadURL() throws Exception {
     HttpURLConnection conn = mock(HttpURLConnection.class);
     HttpURLConnection conn = mock(HttpURLConnection.class);
@@ -63,4 +79,68 @@ public class TestEditLogFileInputStream {
     assertEquals(FAKE_LOG_DATA.length, elis.length());
     assertEquals(FAKE_LOG_DATA.length, elis.length());
     elis.close();
     elis.close();
   }
   }
+
+  /**
+   * Regression test for HDFS-8965 which verifies that
+   * FSEditLogFileInputStream#scanOp verifies Op checksums.
+   */
+  @Test(timeout=60000)
+  public void testScanCorruptEditLog() throws Exception {
+    Configuration conf = new Configuration();
+    File editLog = new File(System.getProperty(
+        "test.build.data", "/tmp"), "testCorruptEditLog");
+
+    LOG.debug("Creating test edit log file: " + editLog);
+    EditLogFileOutputStream elos = new EditLogFileOutputStream(conf,
+        editLog.getAbsoluteFile(), 8192);
+    elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+    FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache();
+    FSEditLogOp.MkdirOp mkdirOp = FSEditLogOp.MkdirOp.getInstance(cache);
+    mkdirOp.reset();
+    mkdirOp.setRpcCallId(123);
+    mkdirOp.setTransactionId(1);
+    mkdirOp.setInodeId(789L);
+    mkdirOp.setPath("/mydir");
+    PermissionStatus perms = PermissionStatus.createImmutable(
+        "myuser", "mygroup", FsPermission.createImmutable((short)0777));
+    mkdirOp.setPermissionStatus(perms);
+    elos.write(mkdirOp);
+    mkdirOp.reset();
+    mkdirOp.setRpcCallId(456);
+    mkdirOp.setTransactionId(2);
+    mkdirOp.setInodeId(123L);
+    mkdirOp.setPath("/mydir2");
+    perms = PermissionStatus.createImmutable(
+        "myuser", "mygroup", FsPermission.createImmutable((short)0666));
+    mkdirOp.setPermissionStatus(perms);
+    elos.write(mkdirOp);
+    elos.setReadyToFlush();
+    elos.flushAndSync(false);
+    elos.close();
+    long fileLen = editLog.length();
+
+    LOG.debug("Corrupting last 4 bytes of edit log file " + editLog +
+        ", whose length is " + fileLen);
+    RandomAccessFile rwf = new RandomAccessFile(editLog, "rw");
+    rwf.seek(fileLen - 4);
+    int b = rwf.readInt();
+    rwf.seek(fileLen - 4);
+    rwf.writeInt(b + 1);
+    rwf.close();
+
+    EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
+    Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
+        elis.getVersion(true));
+    Assert.assertEquals(1, elis.scanNextOp());
+    LOG.debug("Read transaction 1 from " + editLog);
+    try {
+      elis.scanNextOp();
+      Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
+    } catch (IOException e) {
+      LOG.debug("Caught expected checksum error when reading corrupt " +
+          "transaction 2", e);
+      GenericTestUtils.assertExceptionContains("Transaction is corrupt.", e);
+    }
+    elis.close();
+  }
 }
 }

+ 373 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java

@@ -0,0 +1,373 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Verify that the dfs.namenode.protected.directories setting is respected.
+ */
+public class TestProtectedDirectories {
+  static final Logger LOG = LoggerFactory.getLogger(
+      TestProtectedDirectories.class);
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  /**
+   * Start a namenode-only 'cluster' which is configured to protect
+   * the given list of directories.
+   * @param conf
+   * @param protectedDirs
+   * @param unProtectedDirs
+   * @return
+   * @throws IOException
+   */
+  public MiniDFSCluster setupTestCase(Configuration conf,
+                                      Collection<Path> protectedDirs,
+                                      Collection<Path> unProtectedDirs)
+      throws Throwable {
+    // Initialize the configuration.
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES,
+        Joiner.on(",").skipNulls().join(protectedDirs));
+
+    // Start the cluster.
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+
+    // Create all the directories.
+    try {
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      for (Path path : Iterables.concat(protectedDirs, unProtectedDirs)) {
+        fs.mkdirs(path);
+      }
+      return cluster;
+    } catch (Throwable t) {
+      cluster.shutdown();
+      throw t;
+    }
+  }
+
+  /**
+   * Initialize a collection of file system layouts that will be used
+   * as the test matrix.
+   *
+   * @return
+   */
+  private Collection<TestMatrixEntry> createTestMatrix() {
+    Collection<TestMatrixEntry> matrix = new ArrayList<TestMatrixEntry>();
+
+    // single empty unprotected dir.
+    matrix.add(TestMatrixEntry.get()
+        .addUnprotectedDir("/1", true));
+
+    // Single empty protected dir.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1", true));
+
+    // Nested unprotected dirs.
+    matrix.add(TestMatrixEntry.get()
+        .addUnprotectedDir("/1", true)
+        .addUnprotectedDir("/1/2", true)
+        .addUnprotectedDir("/1/2/3", true)
+        .addUnprotectedDir("/1/2/3/4", true));
+
+    // Non-empty protected dir.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1", false)
+        .addUnprotectedDir("/1/2", true));
+
+    // Protected empty child of unprotected parent.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1/2", true)
+        .addUnprotectedDir("/1/2", true));
+
+    // Protected empty child of protected parent.
+    // We should not be able to delete the parent.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1", false)
+        .addProtectedDir("/1/2", true));
+
+    // One of each, non-nested.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1", true)
+        .addUnprotectedDir("/a", true));
+
+    // Protected non-empty child of unprotected parent.
+    // Neither should be deletable.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1/2", false)
+        .addUnprotectedDir("/1/2/3", true)
+        .addUnprotectedDir("/1", false));
+
+    // Protected non-empty child has unprotected siblings.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1/2.2", false)
+        .addUnprotectedDir("/1/2.2/3", true)
+        .addUnprotectedDir("/1/2.1", true)
+        .addUnprotectedDir("/1/2.3", true)
+        .addUnprotectedDir("/1", false));
+
+    // Deeply nested protected child.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1/2/3/4/5", false)
+        .addUnprotectedDir("/1/2/3/4/5/6", true)
+        .addUnprotectedDir("/1", false)
+        .addUnprotectedDir("/1/2", false)
+        .addUnprotectedDir("/1/2/3", false)
+        .addUnprotectedDir("/1/2/3/4", false));
+
+    // Disjoint trees.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/1/2", false)
+        .addProtectedDir("/a/b", false)
+        .addUnprotectedDir("/1/2/3", true)
+        .addUnprotectedDir("/a/b/c", true));
+
+    // The following tests exercise special cases in the path prefix
+    // checks and handling of trailing separators.
+
+    // A disjoint non-empty protected dir has the same string prefix as the
+    // directory we are trying to delete.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/a1", false)
+        .addUnprotectedDir("/a1/a2", true)
+        .addUnprotectedDir("/a", true));
+
+    // The directory we are trying to delete has a non-empty protected
+    // child and we try to delete it with a trailing separator.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/a/b", false)
+        .addUnprotectedDir("/a/b/c", true)
+        .addUnprotectedDir("/a/", false));
+
+    // The directory we are trying to delete has an empty protected
+    // child and we try to delete it with a trailing separator.
+    matrix.add(TestMatrixEntry.get()
+        .addProtectedDir("/a/b", true)
+        .addUnprotectedDir("/a/", true));
+
+    return matrix;
+  }
+
+  @Test
+  public void testAll() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+      Configuration conf = new HdfsConfiguration();
+      MiniDFSCluster cluster = setupTestCase(
+          conf, testMatrixEntry.getProtectedPaths(),
+          testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+          final long countBefore = cluster.getNamesystem().getFilesTotal();
+          assertThat(
+              testMatrixEntry + ": Testing whether " + path + " can be deleted",
+              deletePath(fs, path),
+              is(testMatrixEntry.canPathBeDeleted(path)));
+          final long countAfter = cluster.getNamesystem().getFilesTotal();
+
+          if (!testMatrixEntry.canPathBeDeleted(path)) {
+            assertThat(
+                "Either all paths should be deleted or none",
+                countAfter, is(countBefore));
+          }
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Verify that configured paths are normalized by removing
+   * redundant separators.
+   */
+  @Test
+  public void testProtectedDirNormalization1() {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES,
+        "/foo//bar");
+    Collection<String> paths = FSDirectory.parseProtectedDirectories(conf);
+    assertThat(paths.size(), is(1));
+    assertThat(paths.iterator().next(), is("/foo/bar"));
+  }
+
+  /**
+   * Verify that configured paths are normalized by removing
+   * trailing separators.
+   */
+  @Test
+  public void testProtectedDirNormalization2() {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES,
+        "/a/b/,/c,/d/e/f/");
+    Collection<String> paths = FSDirectory.parseProtectedDirectories(conf);
+
+    for (String path : paths) {
+      assertFalse(path.endsWith("/"));
+    }
+  }
+
+  /**
+   * Verify that configured paths are canonicalized.
+   */
+  @Test
+  public void testProtectedDirIsCanonicalized() {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES,
+        "/foo/../bar/");
+    Collection<String> paths = FSDirectory.parseProtectedDirectories(conf);
+    assertThat(paths.size(), is(1));
+    assertThat(paths.iterator().next(), is("/bar"));   
+  }
+
+  /**
+   * Verify that the root directory in the configuration is correctly handled.
+   */
+  @Test
+  public void testProtectedRootDirectory() {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, "/");
+    Collection<String> paths = FSDirectory.parseProtectedDirectories(conf);
+    assertThat(paths.size(), is(1));
+    assertThat(paths.iterator().next(), is("/"));
+  }
+
+  /**
+   * Verify that invalid paths in the configuration are filtered out.
+   * (Path with scheme, reserved path).
+   */
+  @Test
+  public void testBadPathsInConfig() {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(
+        CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES,
+        "hdfs://foo/,/.reserved/foo");
+    Collection<String> paths = FSDirectory.parseProtectedDirectories(conf);
+    assertThat("Unexpected directories " + paths,
+        paths.size(), is(0));
+  }
+
+  /**
+   * Return true if the path was successfully deleted. False if it
+   * failed with AccessControlException. Any other exceptions are
+   * propagated to the caller.
+   *
+   * @param fs
+   * @param path
+   * @return
+   */
+  private boolean deletePath(FileSystem fs, Path path) throws IOException {
+    try {
+      fs.delete(path, true);
+      return true;
+    } catch (AccessControlException ace) {
+      return false;
+    }
+  }
+
+  private static class TestMatrixEntry {
+    // true if the path can be deleted.
+    final Map<Path, Boolean> protectedPaths = Maps.newHashMap();
+    final Map<Path, Boolean> unProtectedPaths = Maps.newHashMap();
+
+    private TestMatrixEntry() {
+    }
+
+    public static TestMatrixEntry get() {
+      return new TestMatrixEntry();
+    }
+
+    public Collection<Path> getProtectedPaths() {
+      return protectedPaths.keySet();
+    }
+
+    public Collection<Path> getUnprotectedPaths() {
+      return unProtectedPaths.keySet();
+    }
+
+    /**
+     * Get all paths to be deleted in sorted order.
+     * @return sorted collection of paths to be deleted.
+     */
+    @SuppressWarnings("unchecked") // Path implements Comparable incorrectly
+    public Iterable<Path> getAllPathsToBeDeleted() {
+      // Sorting ensures deletion of parents is attempted first.
+      ArrayList<Path> combined = new ArrayList<>();
+      combined.addAll(protectedPaths.keySet());
+      combined.addAll(unProtectedPaths.keySet());
+      Collections.sort(combined);
+      return combined;
+    }
+
+    public boolean canPathBeDeleted(Path path) {
+      return protectedPaths.containsKey(path) ?
+          protectedPaths.get(path) : unProtectedPaths.get(path);
+    }
+
+
+    public TestMatrixEntry addProtectedDir(String dir, boolean canBeDeleted) {
+      protectedPaths.put(new Path(dir), canBeDeleted);
+      return this;
+    }
+
+    public TestMatrixEntry addUnprotectedDir(String dir, boolean canBeDeleted) {
+      unProtectedPaths.put(new Path(dir), canBeDeleted);
+      return this;
+    }
+
+    @Override
+    public String toString() {
+      return "TestMatrixEntry - ProtectedPaths=[" +
+          Joiner.on(", ").join(protectedPaths.keySet()) +
+          "]; UnprotectedPaths=[" +
+          Joiner.on(", ").join(unProtectedPaths.keySet()) + "]";
+    }
+  }
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java

@@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -367,7 +367,7 @@ public class TestShortCircuitLocalRead {
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
       ClientDatanodeProtocol proxy = 
       ClientDatanodeProtocol proxy = 
-          DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
+          DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
       try {
       try {
         proxy.getBlockLocalPathInfo(blk, token);
         proxy.getBlockLocalPathInfo(blk, token);
         Assert.fail("The call should have failed as this user "
         Assert.fail("The call should have failed as this user "

+ 216 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java

@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.CredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestWebHDFSOAuth2 {
+  public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class);
+
+  private ClientAndServer mockWebHDFS;
+  private ClientAndServer mockOAuthServer;
+
+  public final static int WEBHDFS_PORT = 7552;
+  public final static int OAUTH_PORT = 7553;
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON = new Header("Content-Type", "application/json");
+
+  public final static String AUTH_TOKEN = "0123456789abcdef";
+  public final static Header AUTH_TOKEN_HEADER = new Header("AUTHORIZATION", OAuth2ConnectionConfigurator.HEADER + AUTH_TOKEN);
+
+  @Before
+  public void startMockOAuthServer() {
+    mockOAuthServer = startClientAndServer(OAUTH_PORT);
+  }
+  @Before
+  public void startMockWebHDFSServer() {
+    System.setProperty("hadoop.home.dir", System.getProperty("user.dir"));
+
+    mockWebHDFS = startClientAndServer(WEBHDFS_PORT);
+  }
+
+  @Test
+  public void listStatusReturnsAsExpected() throws URISyntaxException, IOException {
+    MockServerClient mockWebHDFSServerClient = new MockServerClient("localhost", WEBHDFS_PORT);
+    MockServerClient mockOAuthServerClient = new MockServerClient("localhost", OAUTH_PORT);
+
+    HttpRequest oauthServerRequest = getOAuthServerMockRequest(mockOAuthServerClient);
+
+    HttpRequest fileSystemRequest = request()
+        .withMethod("GET")
+        .withPath(WebHdfsFileSystem.PATH_PREFIX + "/test1/test2")
+        .withHeader(AUTH_TOKEN_HEADER);
+
+    try {
+      mockWebHDFSServerClient.when(fileSystemRequest,
+          exactly(1)
+      )
+          .respond(
+              response()
+                  .withStatusCode(HttpStatus.SC_OK)
+                  .withHeaders(
+                      CONTENT_TYPE_APPLICATION_JSON
+                  )
+                  .withBody("{\n" +
+                      "  \"FileStatuses\":\n" +
+                      "  {\n" +
+                      "    \"FileStatus\":\n" +
+                      "    [\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 1320171722771,\n" +
+                      "        \"blockSize\"       : 33554432,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 24930,\n" +
+                      "        \"modificationTime\": 1320171722771,\n" +
+                      "        \"owner\"           : \"webuser\",\n" +
+                      "        \"pathSuffix\"      : \"a.patch\",\n" +
+                      "        \"permission\"      : \"644\",\n" +
+                      "        \"replication\"     : 1,\n" +
+                      "        \"type\"            : \"FILE\"\n" +
+                      "      },\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 0,\n" +
+                      "        \"blockSize\"       : 0,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 0,\n" +
+                      "        \"modificationTime\": 1320895981256,\n" +
+                      "        \"owner\"           : \"szetszwo\",\n" +
+                      "        \"pathSuffix\"      : \"bar\",\n" +
+                      "        \"permission\"      : \"711\",\n" +
+                      "        \"replication\"     : 0,\n" +
+                      "        \"type\"            : \"DIRECTORY\"\n" +
+                      "      }\n" +
+                      "    ]\n" +
+                      "  }\n" +
+                      "}\n")
+          );
+
+      FileSystem fs = new WebHdfsFileSystem();
+      Configuration conf = getConfiguration();
+      conf.set(OAUTH_REFRESH_URL_KEY, "http://localhost:" + OAUTH_PORT + "/refresh");
+      conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY, "credential");
+
+      URI uri = new URI("webhdfs://localhost:" + WEBHDFS_PORT);
+      fs.initialize(uri, conf);
+
+      FileStatus[] ls = fs.listStatus(new Path("/test1/test2"));
+
+      mockOAuthServer.verify(oauthServerRequest);
+      mockWebHDFSServerClient.verify(fileSystemRequest);
+
+      assertEquals(2, ls.length);
+      assertEquals("a.patch", ls[0].getPath().getName());
+      assertEquals("bar", ls[1].getPath().getName());
+
+      fs.close();
+    } finally {
+      mockWebHDFSServerClient.clear(fileSystemRequest);
+      mockOAuthServerClient.clear(oauthServerRequest);
+    }
+  }
+
+  private HttpRequest getOAuthServerMockRequest(MockServerClient mockServerClient) throws IOException {
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody("client_secret=credential&grant_type=client_credentials&client_id=MY_CLIENTID");
+    
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, AUTH_TOKEN);
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    return expectedRequest;
+  }
+
+  public Configuration getConfiguration() {
+    Configuration conf = new Configuration();
+
+    // Configs for OAuth2
+    conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, true);
+    conf.set(OAUTH_CLIENT_ID_KEY, "MY_CLIENTID");
+
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+
+    return conf;
+
+  }
+
+  @After
+  public void stopMockWebHDFSServer() {
+      mockWebHDFS.stop();
+  }
+
+  @After
+  public void stopMockOAuthServer() {
+    mockOAuthServer.stop();
+  }
+}

+ 63 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java

@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.util.Timer;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class TestAccessTokenTimer {
+  @Test
+  public void expireConversionWorks() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(5l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresIn("3");
+    assertEquals(3005, timer.getNextRefreshMSSinceEpoch());
+    
+    assertTrue(timer.shouldRefresh());
+  }
+  
+  @Test
+  public void shouldRefreshIsCorrect() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(500l)
+        .thenReturn(1000000l + 500l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresInMSSinceEpoch("1000000");
+    
+    assertFalse(timer.shouldRefresh());
+    assertTrue(timer.shouldRefresh());
+    
+    verify(mockTimer, times(2)).now();
+  } 
+}

+ 138 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java

@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_CREDENTIALS;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_SECRET;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestClientCredentialTimeBasedTokenRefresher {
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public final static String CLIENT_ID_FOR_TESTING = "joebob";
+
+  public Configuration buildConf(String credential, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY,
+        credential);
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = 7552;
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("myreallycoolcredential",
+        Long.toString(tokenExpires),
+        CLIENT_ID_FOR_TESTING,
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider credProvider =
+        new ConfCredentialBasedAccessTokenProvider(mockTimer);
+    credProvider.setConf(conf);
+    
+    // Build mock server to receive refresh request
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody( 
+        // Note, OkHttp does not sort the param values, so we need to do
+        // it ourselves via the ordering provided to ParameterBody...
+            ParameterBody.params(
+                Parameter.param(CLIENT_SECRET, "myreallycoolcredential"),
+                Parameter.param(GRANT_TYPE, CLIENT_CREDENTIALS),
+                Parameter.param(CLIENT_ID, CLIENT_ID_FOR_TESTING)
+                ));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", credProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+}

+ 138 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java

@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_EXPIRES_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.BEARER;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestRefreshTokenTimeBasedTokenRefresher {
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public Configuration buildConf(String refreshToken, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(OAUTH_REFRESH_TOKEN_KEY, refreshToken);
+    conf.set(OAUTH_REFRESH_TOKEN_EXPIRES_KEY, tokenExpires);
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = 7552;
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("refresh token key",
+        Long.toString(tokenExpires),
+        "joebob",
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider tokenProvider =
+        new ConfRefreshTokenBasedAccessTokenProvider(mockTimer);
+    tokenProvider.setConf(conf);
+
+    // Build mock server to receive refresh request
+
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        // Note, OkHttp does not sort the param values, so we need to
+        // do it ourselves via the ordering provided to ParameterBody...
+        .withBody(
+            ParameterBody.params(
+                Parameter.param(CLIENT_ID, "joebob"),
+                Parameter.param(GRANT_TYPE, REFRESH_TOKEN),
+                Parameter.param(REFRESH_TOKEN, "refresh token key")));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, BEARER);
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", tokenProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+
+}

+ 1 - 0
hadoop-project/src/site/site.xml

@@ -121,6 +121,7 @@
       <item name="Fair Scheduler" href="hadoop-yarn/hadoop-yarn-site/FairScheduler.html"/>
       <item name="Fair Scheduler" href="hadoop-yarn/hadoop-yarn-site/FairScheduler.html"/>
       <item name="ResourceManager Restart" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRestart.html"/>
       <item name="ResourceManager Restart" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRestart.html"/>
       <item name="ResourceManager HA" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerHA.html"/>
       <item name="ResourceManager HA" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerHA.html"/>
+      <item name="Node Labels" href="hadoop-yarn/hadoop-yarn-site/NodeLabel.html"/>
       <item name="Web Application Proxy" href="hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html"/>
       <item name="Web Application Proxy" href="hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html"/>
       <item name="Timeline Server" href="hadoop-yarn/hadoop-yarn-site/TimelineServer.html"/>
       <item name="Timeline Server" href="hadoop-yarn/hadoop-yarn-site/TimelineServer.html"/>
       <item name="Writing YARN Applications" href="hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html"/>
       <item name="Writing YARN Applications" href="hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html"/>

Some files were not shown because too many files changed in this diff