Selaa lähdekoodia

Merge remote-tracking branch 'origin/trunk' into HDFS-8966

Jing Zhao 9 vuotta sitten
vanhempi
commit
27009cf32a
100 muutettua tiedostoa jossa 3049 lisäystä ja 390 poistoa
  1. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  2. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
  3. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
  4. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
  5. 2 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
  6. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml
  7. 3 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
  8. 2 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
  9. 2 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  10. 2 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  11. 31 1
      hadoop-common-project/hadoop-common/CHANGES.txt
  12. 0 1
      hadoop-common-project/hadoop-common/pom.xml
  13. 10 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  14. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  15. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  16. 91 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
  17. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java
  18. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java
  19. 43 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderOption.java
  20. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java
  21. 39 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java
  22. 14 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
  23. 17 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java
  24. 19 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java
  25. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java
  26. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java
  27. 42 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  28. 8 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
  29. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
  30. 1 0
      hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
  31. 30 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
  32. 31 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java
  33. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java
  34. 25 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
  35. 17 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
  36. 3 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
  37. 9 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
  38. 21 0
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  39. 0 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
  40. 18 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
  41. 38 40
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
  42. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
  43. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
  44. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
  45. 5 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
  46. 12 13
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
  47. 5 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
  48. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
  49. 9 9
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
  50. 18 19
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
  51. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java
  52. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
  53. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
  54. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
  55. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
  56. 4 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
  57. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
  58. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
  59. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
  60. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
  61. 65 4
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  62. 0 6
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  63. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  64. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  65. 32 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  66. 70 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  67. 15 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  68. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
  69. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  70. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  71. 25 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  72. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
  73. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
  74. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  75. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
  76. 1657 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
  77. 29 91
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
  78. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
  79. 15 18
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
  80. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
  81. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
  82. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
  83. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
  84. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
  85. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
  86. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
  87. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
  88. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
  89. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
  90. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
  91. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
  92. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
  93. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
  94. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
  95. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
  96. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
  97. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
  98. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
  99. 23 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
  100. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -14,7 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-distro</id>
   <formats>
     <format>dir</format>

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml

@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-hdfs-nfs-dist</id>
   <formats>
     <format>dir</format>

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml

@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-httpfs-dist</id>
   <formats>
     <format>dir</format>

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-kms-dist</id>
   <formats>
     <format>dir</format>

+ 2 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml

@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-mapreduce-dist</id>
   <formats>
     <format>dir</format>

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml

@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-nfs-dist</id>
   <formats>
     <format>dir</format>

+ 3 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml

@@ -15,7 +15,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-sls</id>
   <formats>
     <format>dir</format>

+ 2 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml

@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-src</id>
   <formats>
     <format>tar.gz</format>

+ 2 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-tools</id>
   <formats>
     <format>dir</format>

+ 2 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-yarn-dist</id>
   <formats>
     <format>dir</format>

+ 31 - 1
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -249,6 +249,10 @@ Trunk (Unreleased)
     HADOOP-12385. Include nested stack trace in SaslRpcClient.getServerToken()
     (stevel)
 
+    HADOOP-12133. Add schemas to Maven Assembly XMLs (Gábor Lipták via aw)
+
+    HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
@@ -603,6 +607,12 @@ Trunk (Unreleased)
 
       HADOOP-11921. Enhance tests for erasure coders. (Kai Zheng)
 
+      HADOOP-12327. Initialize output buffers with ZERO bytes in erasure coder.
+      (Kai Zheng via waltersu4549)
+
+      HADOOP-12047. Indicate preference not to affect input buffers during
+      coding in erasure coder. (Kai Zheng via waltersu4549)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1296,6 +1306,14 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12519. hadoop-azure tests should avoid creating a metrics
     configuration file in the module root directory. (cnauroth)
 
+    HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API.
+    (Dushyanth via cnauroth)
+
+    HADOOP-12508. delete fails with exception when lease is held on blob.
+    (Gaurav Kanade via cnauroth)
+
+    HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
@@ -2153,7 +2171,19 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
     Tomcat deployments. (Bowen Zhang via wheat9)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

+ 0 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -194,7 +194,6 @@
     <dependency>
       <groupId>com.google.re2j</groupId>
       <artifactId>re2j</artifactId>
-      <version>${re2j.version}</version>
       <scope>compile</scope>
     </dependency>
     <dependency>

+ 10 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2079,16 +2079,17 @@ public abstract class FileSystem extends Configured implements Closeable {
     CACHE.remove(this.key, this);
   }
 
-  /** Return the total size of all files in the filesystem.*/
-  public long getUsed() throws IOException{
-    long used = 0;
-    RemoteIterator<LocatedFileStatus> files = listFiles(new Path("/"), true);
-    while (files.hasNext()) {
-      used += files.next().getLen();
-    }
-    return used;
+  /** Return the total size of all files in the filesystem. */
+  public long getUsed() throws IOException {
+    Path path = new Path("/");
+    return getUsed(path);
   }
-  
+
+  /** Return the total size of all files from a specified path. */
+  public long getUsed(Path path) throws IOException {
+    return getContentSummary(path).getLength();
+  }
+
   /**
    * Get the block size for a particular file.
    * @param f the filename

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -389,7 +389,13 @@ public class FilterFileSystem extends FileSystem {
   public long getUsed() throws IOException{
     return fs.getUsed();
   }
-  
+
+  /** Return the total size of all files from a specified path.*/
+  @Override
+  public long getUsed(Path path) throws IOException {
+    return fs.getUsed(path);
+  }
+
   @Override
   public long getDefaultBlockSize() {
     return fs.getDefaultBlockSize();

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -1237,6 +1237,12 @@ public class HarFileSystem extends FileSystem {
     return fs.getUsed();
   }
 
+  /** Return the total size of all files from a specified path.*/
+  @Override
+  public long getUsed(Path path) throws IOException {
+    return fs.getUsed(path);
+  }
+
   @SuppressWarnings("deprecation")
   @Override
   public long getDefaultBlockSize() {

+ 91 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configured;
 
 import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -32,14 +34,60 @@ import java.nio.ByteBuffer;
 public abstract class AbstractRawErasureCoder
     extends Configured implements RawErasureCoder {
 
+  private static byte[] emptyChunk = new byte[4096];
   private final int numDataUnits;
   private final int numParityUnits;
   private final int numAllUnits;
+  private final Map<CoderOption, Object> coderOptions;
 
   public AbstractRawErasureCoder(int numDataUnits, int numParityUnits) {
     this.numDataUnits = numDataUnits;
     this.numParityUnits = numParityUnits;
     this.numAllUnits = numDataUnits + numParityUnits;
+    this.coderOptions = new HashMap<>(3);
+
+    coderOptions.put(CoderOption.PREFER_DIRECT_BUFFER, preferDirectBuffer());
+    coderOptions.put(CoderOption.ALLOW_CHANGE_INPUTS, false);
+    coderOptions.put(CoderOption.ALLOW_VERBOSE_DUMP, false);
+  }
+
+  @Override
+  public Object getCoderOption(CoderOption option) {
+    if (option == null) {
+      throw new HadoopIllegalArgumentException("Invalid option");
+    }
+    return coderOptions.get(option);
+  }
+
+  @Override
+  public void setCoderOption(CoderOption option, Object value) {
+    if (option == null || value == null) {
+      throw new HadoopIllegalArgumentException(
+          "Invalid option or option value");
+    }
+    if (option.isReadOnly()) {
+      throw new HadoopIllegalArgumentException(
+          "The option is read-only: " + option.name());
+    }
+
+    coderOptions.put(option, value);
+  }
+
+  /**
+   * Make sure to return an empty chunk buffer for the desired length.
+   * @param leastLength
+   * @return empty chunk of zero bytes
+   */
+  protected static byte[] getEmptyChunk(int leastLength) {
+    if (emptyChunk.length >= leastLength) {
+      return emptyChunk; // In most time
+    }
+
+    synchronized (AbstractRawErasureCoder.class) {
+      emptyChunk = new byte[leastLength];
+    }
+
+    return emptyChunk;
   }
 
   @Override
@@ -57,13 +105,35 @@ public abstract class AbstractRawErasureCoder
   }
 
   @Override
-  public boolean preferDirectBuffer() {
+  public void release() {
+    // Nothing to do by default
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  protected boolean preferDirectBuffer() {
     return false;
   }
 
-  @Override
-  public void release() {
-    // Nothing to do by default
+  protected boolean isAllowingChangeInputs() {
+    Object value = getCoderOption(CoderOption.ALLOW_CHANGE_INPUTS);
+    if (value != null && value instanceof Boolean) {
+      return (boolean) value;
+    }
+    return false;
+  }
+
+  protected boolean isAllowingVerboseDump() {
+    Object value = getCoderOption(CoderOption.ALLOW_VERBOSE_DUMP);
+    if (value != null && value instanceof Boolean) {
+      return (boolean) value;
+    }
+    return false;
   }
 
   /**
@@ -73,11 +143,9 @@ public abstract class AbstractRawErasureCoder
    * @return the buffer itself, with ZERO bytes written, the position and limit
    *         are not changed after the call
    */
-  protected ByteBuffer resetBuffer(ByteBuffer buffer) {
+  protected ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
     int pos = buffer.position();
-    for (int i = pos; i < buffer.limit(); ++i) {
-      buffer.put((byte) 0);
-    }
+    buffer.put(getEmptyChunk(len), 0, len);
     buffer.position(pos);
 
     return buffer;
@@ -90,9 +158,8 @@ public abstract class AbstractRawErasureCoder
    * @return the buffer itself
    */
   protected byte[] resetBuffer(byte[] buffer, int offset, int len) {
-    for (int i = offset; i < len; ++i) {
-      buffer[i] = (byte) 0;
-    }
+    byte[] empty = getEmptyChunk(len);
+    System.arraycopy(empty, 0, buffer, offset, len);
 
     return buffer;
   }
@@ -104,9 +171,10 @@ public abstract class AbstractRawErasureCoder
    * @param allowNull whether to allow any element to be null or not
    * @param dataLen the length of data available in the buffer to ensure with
    * @param isDirectBuffer is direct buffer or not to ensure with
+   * @param isOutputs is output buffer or not
    */
-  protected void ensureLengthAndType(ByteBuffer[] buffers, boolean allowNull,
-                                     int dataLen, boolean isDirectBuffer) {
+  protected void checkParameterBuffers(ByteBuffer[] buffers, boolean
+      allowNull, int dataLen, boolean isDirectBuffer, boolean isOutputs) {
     for (ByteBuffer buffer : buffers) {
       if (buffer == null && !allowNull) {
         throw new HadoopIllegalArgumentException(
@@ -120,18 +188,23 @@ public abstract class AbstractRawErasureCoder
           throw new HadoopIllegalArgumentException(
               "Invalid buffer, isDirect should be " + isDirectBuffer);
         }
+        if (isOutputs) {
+          resetBuffer(buffer, dataLen);
+        }
       }
     }
   }
 
   /**
-   * Check and ensure the buffers are of the length specified by dataLen.
+   * Check and ensure the buffers are of the length specified by dataLen. If is
+   * output buffers, ensure they will be ZEROed.
    * @param buffers the buffers to check
    * @param allowNull whether to allow any element to be null or not
    * @param dataLen the length of data available in the buffer to ensure with
+   * @param isOutputs is output buffer or not
    */
-  protected void ensureLength(byte[][] buffers,
-                              boolean allowNull, int dataLen) {
+  protected void checkParameterBuffers(byte[][] buffers, boolean allowNull,
+                                       int dataLen, boolean isOutputs) {
     for (byte[] buffer : buffers) {
       if (buffer == null && !allowNull) {
         throw new HadoopIllegalArgumentException(
@@ -139,6 +212,8 @@ public abstract class AbstractRawErasureCoder
       } else if (buffer != null && buffer.length != dataLen) {
         throw new HadoopIllegalArgumentException(
             "Invalid buffer not of length " + dataLen);
+      } else if (isOutputs) {
+        resetBuffer(buffer, 0, dataLen);
       }
     }
   }

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java

@@ -48,8 +48,8 @@ public abstract class AbstractRawErasureDecoder extends AbstractRawErasureCoder
     if (dataLen == 0) {
       return;
     }
-    ensureLengthAndType(inputs, true, dataLen, usingDirectBuffer);
-    ensureLengthAndType(outputs, false, dataLen, usingDirectBuffer);
+    checkParameterBuffers(inputs, true, dataLen, usingDirectBuffer, false);
+    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
 
     if (usingDirectBuffer) {
       doDecode(inputs, erasedIndexes, outputs);
@@ -106,8 +106,8 @@ public abstract class AbstractRawErasureDecoder extends AbstractRawErasureCoder
     if (dataLen == 0) {
       return;
     }
-    ensureLength(inputs, true, dataLen);
-    ensureLength(outputs, false, dataLen);
+    checkParameterBuffers(inputs, true, dataLen, false);
+    checkParameterBuffers(outputs, false, dataLen, true);
 
     int[] inputOffsets = new int[inputs.length]; // ALL ZERO
     int[] outputOffsets = new int[outputs.length]; // ALL ZERO

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java

@@ -45,8 +45,8 @@ public abstract class AbstractRawErasureEncoder extends AbstractRawErasureCoder
     if (dataLen == 0) {
       return;
     }
-    ensureLengthAndType(inputs, false, dataLen, usingDirectBuffer);
-    ensureLengthAndType(outputs, false, dataLen, usingDirectBuffer);
+    checkParameterBuffers(inputs, false, dataLen, usingDirectBuffer, false);
+    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
 
     if (usingDirectBuffer) {
       doEncode(inputs, outputs);
@@ -93,8 +93,8 @@ public abstract class AbstractRawErasureEncoder extends AbstractRawErasureCoder
     if (dataLen == 0) {
       return;
     }
-    ensureLength(inputs, false, dataLen);
-    ensureLength(outputs, false, dataLen);
+    checkParameterBuffers(inputs, false, dataLen, false);
+    checkParameterBuffers(outputs, false, dataLen, true);
 
     int[] inputOffsets = new int[inputs.length]; // ALL ZERO
     int[] outputOffsets = new int[outputs.length]; // ALL ZERO

+ 43 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderOption.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * Supported erasure coder options.
+ */
+public enum CoderOption {
+  /* If direct buffer is preferred, for perf consideration */
+  PREFER_DIRECT_BUFFER(true),    // READ-ONLY
+  /**
+   * Allow changing input buffer content (not positions).
+   * Maybe better perf if allowed
+   */
+  ALLOW_CHANGE_INPUTS(false),    // READ-WRITE
+  /* Allow dump verbose debug info or not */
+  ALLOW_VERBOSE_DUMP(false);     // READ-WRITE
+
+  private boolean isReadOnly = false;
+
+  CoderOption(boolean isReadOnly) {
+    this.isReadOnly = isReadOnly;
+  }
+
+  public boolean isReadOnly() {
+    return isReadOnly;
+  }
+};

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java

@@ -206,7 +206,7 @@ public class RSRawDecoder extends AbstractRawErasureDecoder {
         if (erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
           found = true;
           adjustedDirectBufferOutputsParameter[j] =
-              resetBuffer(outputs[outputIdx++]);
+              resetBuffer(outputs[outputIdx++], dataLen);
         }
       }
       if (!found) {
@@ -220,7 +220,7 @@ public class RSRawDecoder extends AbstractRawErasureDecoder {
         ByteBuffer buffer = checkGetDirectBuffer(bufferIdx, dataLen);
         buffer.position(0);
         buffer.limit(dataLen);
-        adjustedDirectBufferOutputsParameter[i] = resetBuffer(buffer);
+        adjustedDirectBufferOutputsParameter[i] = resetBuffer(buffer, dataLen);
         bufferIdx++;
       }
     }

+ 39 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
 
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 
 /**
  * A raw erasure encoder in RS code scheme in pure Java in case native one
@@ -54,8 +55,26 @@ public class RSRawEncoder extends AbstractRawErasureEncoder {
   protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
     // parity units + data units
     ByteBuffer[] all = new ByteBuffer[outputs.length + inputs.length];
-    System.arraycopy(outputs, 0, all, 0, outputs.length);
-    System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+
+    if (isAllowingChangeInputs()) {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    } else {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+
+      /**
+       * Note when this coder would be really (rarely) used in a production
+       * system, this can  be optimized to cache and reuse the new allocated
+       * buffers avoiding reallocating.
+       */
+      ByteBuffer tmp;
+      for (int i = 0; i < inputs.length; i++) {
+        tmp = ByteBuffer.allocate(inputs[i].remaining());
+        tmp.put(inputs[i]);
+        tmp.flip();
+        all[outputs.length + i] = tmp;
+      }
+    }
 
     // Compute the remainder
     RSUtil.GF.remainder(all, generatingPolynomial);
@@ -67,15 +86,26 @@ public class RSRawEncoder extends AbstractRawErasureEncoder {
                           int[] outputOffsets) {
     // parity units + data units
     byte[][] all = new byte[outputs.length + inputs.length][];
-    System.arraycopy(outputs, 0, all, 0, outputs.length);
-    System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    int[] allOffsets = new int[outputOffsets.length + inputOffsets.length];
 
-    int[] offsets = new int[inputOffsets.length + outputOffsets.length];
-    System.arraycopy(outputOffsets, 0, offsets, 0, outputOffsets.length);
-    System.arraycopy(inputOffsets, 0, offsets,
-        outputOffsets.length, inputOffsets.length);
+    if (isAllowingChangeInputs()) {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+
+      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
+      System.arraycopy(inputOffsets, 0, allOffsets,
+          outputOffsets.length, inputOffsets.length);
+    } else {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
+
+      for (int i = 0; i < inputs.length; i++) {
+        all[outputs.length + i] = Arrays.copyOfRange(inputs[i],
+            inputOffsets[i], inputOffsets[i] + dataLen);
+      }
+    }
 
     // Compute the remainder
-    RSUtil.GF.remainder(all, offsets, dataLen, generatingPolynomial);
+    RSUtil.GF.remainder(all, allOffsets, dataLen, generatingPolynomial);
   }
 }

+ 14 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java

@@ -37,6 +37,20 @@ import org.apache.hadoop.conf.Configurable;
 @InterfaceAudience.Private
 public interface RawErasureCoder extends Configurable {
 
+  /**
+   * Get a coder option value.
+   * @param option
+   * @return
+   */
+  public Object getCoderOption(CoderOption option);
+
+  /**
+   * Set a coder option value.
+   * @param option
+   * @param value
+   */
+  public void setCoderOption(CoderOption option, Object value);
+
   /**
    * The number of data input units for the coding. A unit can be a byte,
    * chunk or buffer or even a block.
@@ -51,15 +65,6 @@ public interface RawErasureCoder extends Configurable {
    */
   public int getNumParityUnits();
 
-  /**
-   * Tell if direct buffer is preferred or not. It's for callers to
-   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
-   * bytes array. It will return false by default.
-   * @return true if native buffer is preferred for performance consideration,
-   * otherwise false.
-   */
-  public boolean preferDirectBuffer();
-
   /**
    * Should be called when release this coder. Good chance to release encoding
    * or decoding buffers

+ 17 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java

@@ -54,24 +54,27 @@ public interface RawErasureDecoder extends RawErasureCoder {
    * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
    * buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation. Anyway the positions of input buffers will move forward.
+   *
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
+  void decode(ByteBuffer[] inputs, int[] erasedIndexes,
                      ByteBuffer[] outputs);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
-   * @param inputs inputs to read data from, contents may change after the call
+   *
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs);
+  void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
@@ -79,12 +82,11 @@ public interface RawErasureDecoder extends RawErasureCoder {
    * Note, for both input and output ECChunks, no mixing of on-heap buffers and
    * direct buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs);
+  void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs);
 
 }

+ 19 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java

@@ -38,29 +38,32 @@ public interface RawErasureEncoder extends RawErasureCoder {
    * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
    * buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation. Anyway the positions of input buffers will move forward.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs);
+  void encode(ByteBuffer[] inputs, ByteBuffer[] outputs);
 
   /**
-   * Encode with inputs and generates outputs
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs outputs to write into for data generated, ready for reading
-   *                the result data from after the call
+   * Encode with inputs and generates outputs. More see above.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(byte[][] inputs, byte[][] outputs);
+  void encode(byte[][] inputs, byte[][] outputs);
 
   /**
-   * Encode with inputs and generates outputs.
-   *
-   * Note, for both input and output ECChunks, no mixing of on-heap buffers and
-   * direct buffers are allowed.
+   * Encode with inputs and generates outputs. More see above.
    *
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs outputs to write into for data generated, ready for reading
-   *                the result data from after the call
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(ECChunk[] inputs, ECChunk[] outputs);
+  void encode(ECChunk[] inputs, ECChunk[] outputs);
 
 }

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java

@@ -39,7 +39,6 @@ public class XORRawDecoder extends AbstractRawErasureDecoder {
   protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
                           ByteBuffer[] outputs) {
     ByteBuffer output = outputs[0];
-    resetBuffer(output);
 
     int erasedIdx = erasedIndexes[0];
 

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java

@@ -37,7 +37,6 @@ public class XORRawEncoder extends AbstractRawErasureEncoder {
 
   protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
     ByteBuffer output = outputs[0];
-    resetBuffer(output);
 
     // Get the first buffer's data.
     int iIdx, oIdx;

+ 42 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -139,7 +139,17 @@ public class RetryPolicies {
       Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
     return new RemoteExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
   }
-  
+
+  /**
+   * A retry policy for exceptions other than RemoteException.
+   */
+  public static final RetryPolicy retryOtherThanRemoteException(
+      RetryPolicy defaultPolicy,
+      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+    return new OtherThanRemoteExceptionDependentRetry(defaultPolicy,
+        exceptionToPolicyMap);
+  }
+
   public static final RetryPolicy failoverOnNetworkException(int maxFailovers) {
     return failoverOnNetworkException(TRY_ONCE_THEN_FAIL, maxFailovers);
   }
@@ -489,7 +499,37 @@ public class RetryPolicies {
       return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
     }
   }
-  
+
+  static class OtherThanRemoteExceptionDependentRetry implements RetryPolicy {
+
+    private RetryPolicy defaultPolicy;
+    private Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
+
+    public OtherThanRemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
+        Map<Class<? extends Exception>,
+        RetryPolicy> exceptionToPolicyMap) {
+      this.defaultPolicy = defaultPolicy;
+      this.exceptionToPolicyMap = exceptionToPolicyMap;
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isIdempotentOrAtMostOnce) throws Exception {
+      RetryPolicy policy = null;
+      // ignore Remote Exception
+      if (e instanceof RemoteException) {
+        // do nothing
+      } else {
+        policy = exceptionToPolicyMap.get(e.getClass());
+      }
+      if (policy == null) {
+        policy = defaultPolicy;
+      }
+      return policy.shouldRetry(
+          e, retries, failovers, isIdempotentOrAtMostOnce);
+    }
+  }
+
   static class ExponentialBackoffRetry extends RetryLimited {
     
     public ExponentialBackoffRetry(

+ 8 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java

@@ -44,6 +44,7 @@ public class CallerContext {
    * {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT}
    */
   private final String context;
+
   /** The caller's signature for validation.
    *
    * The signature is optional. The null or empty signature will be abandoned.
@@ -58,10 +59,6 @@ public class CallerContext {
     this.signature = builder.signature;
   }
 
-  public boolean isValid() {
-    return context != null;
-  }
-
   public String getContext() {
     return context;
   }
@@ -71,6 +68,11 @@ public class CallerContext {
         null : Arrays.copyOf(signature, signature.length);
   }
 
+  @InterfaceAudience.Private
+  public boolean isContextValid() {
+    return context != null && !context.isEmpty();
+  }
+
   @Override
   public int hashCode() {
     return new HashCodeBuilder().append(context).toHashCode();
@@ -92,9 +94,10 @@ public class CallerContext {
           .isEquals();
     }
   }
+
   @Override
   public String toString() {
-    if (!isValid()) {
+    if (!isContextValid()) {
       return "";
     }
     String str = context;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java

@@ -180,7 +180,7 @@ public abstract class ProtoUtil {
 
     // Add caller context if it is not null
     CallerContext callerContext = CallerContext.getCurrent();
-    if (callerContext != null && callerContext.isValid()) {
+    if (callerContext != null && callerContext.isContextValid()) {
       RPCCallerContextProto.Builder contextBuilder = RPCCallerContextProto
           .newBuilder().setContext(callerContext.getContext());
       if (callerContext.getSignature() != null) {

+ 1 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

@@ -240,6 +240,7 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `LockQueueLength` | Number of threads waiting to acquire FSNameSystem lock |
 | `TotalSyncCount` | Total number of sync operations performed by edit log |
 | `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
+| `NameDirSize` | NameNode name directories size in bytes |
 
 JournalNode
 -----------

+ 30 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java

@@ -64,6 +64,8 @@ public abstract class TestCoderBase {
   private static int FIXED_DATA_GENERATOR = 0;
   protected byte[][] fixedData;
 
+  protected boolean allowChangeInputs;
+
   protected int getChunkSize() {
     return chunkSize;
   }
@@ -253,6 +255,22 @@ public abstract class TestCoderBase {
     }
   }
 
+  protected void markChunks(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().mark();
+      }
+    }
+  }
+
+  protected void restoreChunksFromMark(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().reset();
+      }
+    }
+  }
+
   /**
    * Clone chunks along with copying the associated data. It respects how the
    * chunk buffer is allocated, direct or non-direct. It avoids affecting the
@@ -277,6 +295,10 @@ public abstract class TestCoderBase {
    * @return a new chunk
    */
   protected ECChunk cloneChunkWithData(ECChunk chunk) {
+    if (chunk == null) {
+      return null;
+    }
+
     ByteBuffer srcBuffer = chunk.getBuffer();
 
     byte[] bytesArr = new byte[srcBuffer.remaining()];
@@ -453,14 +475,16 @@ public abstract class TestCoderBase {
     byte[][] bytesArr = new byte[chunks.length][];
 
     for (int i = 0; i < chunks.length; i++) {
-      bytesArr[i] = chunks[i].toBytesArray();
+      if (chunks[i] != null) {
+        bytesArr[i] = chunks[i].toBytesArray();
+      }
     }
 
     return bytesArr;
   }
 
   /**
-   * Dump all the settings used in the test case if allowDump is enabled.
+   * Dump all the settings used in the test case if isAllowingVerboseDump is enabled.
    */
   protected void dumpSetting() {
     if (allowDump) {
@@ -473,14 +497,16 @@ public abstract class TestCoderBase {
               append(Arrays.toString(erasedDataIndexes));
       sb.append(" erasedParityIndexes=").
               append(Arrays.toString(erasedParityIndexes));
-      sb.append(" usingDirectBuffer=").append(usingDirectBuffer).append("\n");
+      sb.append(" usingDirectBuffer=").append(usingDirectBuffer);
+      sb.append(" isAllowingChangeInputs=").append(allowChangeInputs);
+      sb.append("\n");
 
       System.out.println(sb.toString());
     }
   }
 
   /**
-   * Dump chunks prefixed with a header if allowDump is enabled.
+   * Dump chunks prefixed with a header if isAllowingVerboseDump is enabled.
    * @param header
    * @param chunks
    */

+ 31 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java

@@ -68,9 +68,9 @@ public abstract class TestRawCoderBase extends TestCoderBase {
      * The following runs will use 3 different chunkSize for inputs and outputs,
      * to verify the same encoder/decoder can process variable width of data.
      */
-    performTestCoding(baseChunkSize, true, false, false);
-    performTestCoding(baseChunkSize - 17, false, false, false);
-    performTestCoding(baseChunkSize + 16, true, false, false);
+    performTestCoding(baseChunkSize, true, false, false, false);
+    performTestCoding(baseChunkSize - 17, false, false, false, true);
+    performTestCoding(baseChunkSize + 16, true, false, false, false);
   }
 
   /**
@@ -82,7 +82,7 @@ public abstract class TestRawCoderBase extends TestCoderBase {
     prepareCoders();
 
     try {
-      performTestCoding(baseChunkSize, false, true, false);
+      performTestCoding(baseChunkSize, false, true, false, true);
       Assert.fail("Encoding test with bad input should fail");
     } catch (Exception e) {
       // Expected
@@ -98,7 +98,7 @@ public abstract class TestRawCoderBase extends TestCoderBase {
     prepareCoders();
 
     try {
-      performTestCoding(baseChunkSize, false, false, true);
+      performTestCoding(baseChunkSize, false, false, true, true);
       Assert.fail("Decoding test with bad output should fail");
     } catch (Exception e) {
       // Expected
@@ -123,9 +123,11 @@ public abstract class TestRawCoderBase extends TestCoderBase {
   }
 
   private void performTestCoding(int chunkSize, boolean usingSlicedBuffer,
-                                 boolean useBadInput, boolean useBadOutput) {
+                                 boolean useBadInput, boolean useBadOutput,
+                                 boolean allowChangeInputs) {
     setChunkSize(chunkSize);
     prepareBufferAllocator(usingSlicedBuffer);
+    setAllowChangeInputs(allowChangeInputs);
 
     dumpSetting();
 
@@ -141,10 +143,16 @@ public abstract class TestRawCoderBase extends TestCoderBase {
     // Backup all the source chunks for later recovering because some coders
     // may affect the source data.
     ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
+    markChunks(dataChunks);
 
     encoder.encode(dataChunks, parityChunks);
     dumpChunks("Encoded parity chunks", parityChunks);
 
+    if (!allowChangeInputs) {
+      restoreChunksFromMark(dataChunks);
+      compareAndVerify(clonedDataChunks, dataChunks);
+    }
+
     // Backup and erase some chunks
     ECChunk[] backupChunks = backupAndEraseChunks(clonedDataChunks, parityChunks);
 
@@ -160,14 +168,31 @@ public abstract class TestRawCoderBase extends TestCoderBase {
       corruptSomeChunk(recoveredChunks);
     }
 
+    ECChunk[] clonedInputChunks = null;
+    if (!allowChangeInputs) {
+      markChunks(inputChunks);
+      clonedInputChunks = cloneChunksWithData(inputChunks);
+    }
+
     dumpChunks("Decoding input chunks", inputChunks);
     decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
     dumpChunks("Decoded/recovered chunks", recoveredChunks);
 
+    if (!allowChangeInputs) {
+      restoreChunksFromMark(inputChunks);
+      compareAndVerify(clonedInputChunks, inputChunks);
+    }
+
     // Compare
     compareAndVerify(backupChunks, recoveredChunks);
   }
 
+  private void setAllowChangeInputs(boolean allowChangeInputs) {
+    this.allowChangeInputs = allowChangeInputs;
+    encoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, allowChangeInputs);
+    decoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, allowChangeInputs);
+  }
+
   private void prepareCoders() {
     if (encoder == null) {
       encoder = createEncoder();

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java

@@ -29,6 +29,7 @@ public class TestXORRawCoder extends TestRawCoderBase {
   public void setup() {
     this.encoderClass = XORRawEncoder.class;
     this.decoderClass = XORRawDecoder.class;
+    setAllowDump(false);
   }
 
   @Test

+ 25 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.io.retry.RetryPolicies.RETRY_FOREVER;
 import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryOtherThanRemoteException;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
@@ -29,6 +30,7 @@ import static org.apache.hadoop.io.retry.RetryPolicies.retryForeverWithFixedSlee
 import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
 import static org.junit.Assert.*;
 
+import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
 import java.util.concurrent.Callable;
@@ -213,8 +215,29 @@ public class TestRetryProxy {
     } catch (RemoteException e) {
       // expected
     }
-  }  
-  
+  }
+
+  @Test
+  public void testRetryOtherThanRemoteException() throws Throwable {
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
+        Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(
+            IOException.class, RETRY_FOREVER);
+
+    UnreliableInterface unreliable = (UnreliableInterface)
+        RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+            retryOtherThanRemoteException(TRY_ONCE_THEN_FAIL,
+                exceptionToPolicyMap));
+    // should retry with local IOException.
+    unreliable.failsOnceWithIOException();
+    try {
+      // won't get retry on remote exception
+      unreliable.failsOnceWithRemoteException();
+      fail("Should fail");
+    } catch (RemoteException e) {
+      // expected
+    }
+  }
+
   @Test
   public void testRetryInterruptible() throws Throwable {
     final UnreliableInterface unreliable = (UnreliableInterface)

+ 17 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java

@@ -26,6 +26,8 @@ class UnreliableImplementation implements UnreliableInterface {
 
   private int failsOnceInvocationCount,
     failsOnceWithValueInvocationCount,
+    failsOnceIOExceptionInvocationCount,
+    failsOnceRemoteExceptionInvocationCount,
     failsTenTimesInvocationCount,
     succeedsOnceThenFailsCount,
     succeedsOnceThenFailsIdempotentCount,
@@ -89,6 +91,21 @@ class UnreliableImplementation implements UnreliableInterface {
     return true;
   }
 
+  @Override
+  public void failsOnceWithIOException() throws IOException {
+    if (failsOnceIOExceptionInvocationCount++ == 0) {
+      throw new IOException("test exception for failsOnceWithIOException");
+    }
+  }
+
+  @Override
+  public void failsOnceWithRemoteException() throws RemoteException {
+    if (failsOnceRemoteExceptionInvocationCount++ == 0) {
+      throw new RemoteException(IOException.class.getName(),
+          "test exception for failsOnceWithRemoteException");
+    }
+  }
+
   @Override
   public void failsTenTimesThenSucceeds() throws UnreliableException {
     if (failsTenTimesInvocationCount++ < 10) {

+ 3 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java

@@ -54,6 +54,9 @@ public interface UnreliableInterface {
   void alwaysFailsWithFatalException() throws FatalException;
   void alwaysFailsWithRemoteFatalException() throws RemoteException;
 
+  void failsOnceWithIOException() throws IOException;
+  void failsOnceWithRemoteException() throws RemoteException;
+
   void failsOnceThenSucceeds() throws UnreliableException;
   boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
 

+ 9 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java

@@ -30,6 +30,7 @@ import javax.naming.NameNotFoundException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
 
 import org.junit.Test;
@@ -37,6 +38,7 @@ import org.junit.Test;
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 /**
  * Test host name and IP resolution and caching.
@@ -185,13 +187,17 @@ public class TestDNS {
    *
    * This test may fail on some misconfigured test machines that don't have
    * an entry for "localhost" in their hosts file. This entry is correctly
-   * configured out of the box on common Linux distributions, OS X and
-   * Windows.
+   * configured out of the box on common Linux distributions and OS X.
+   *
+   * Windows refuses to resolve 127.0.0.1 to "localhost" despite the presence of
+   * this entry in the hosts file.  We skip the test on Windows to avoid
+   * reporting a spurious failure.
    *
    * @throws Exception
    */
   @Test (timeout=60000)
   public void testLookupWithHostsFallback() throws Exception {
+    assumeTrue(!Shell.WINDOWS);
     final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
 
     try {
@@ -231,7 +237,7 @@ public class TestDNS {
 
   private String getLoopbackInterface() throws SocketException {
     return NetworkInterface.getByInetAddress(
-        InetAddress.getLoopbackAddress()).getDisplayName();
+        InetAddress.getLoopbackAddress()).getName();
   }
 
   /**

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -51,6 +51,27 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mock-server</groupId>
+      <artifactId>mockserver-netty</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java

@@ -892,7 +892,6 @@ public class DFSStripedInputStream extends DFSInputStream {
 
     @Override
     void decode() {
-      // TODO no copy for data chunks. this depends on HADOOP-12047
       final int span = (int) alignedStripe.getSpanInBlock();
       for (int i = 0; i < alignedStripe.chunks.length; i++) {
         if (alignedStripe.chunks[i] != null &&

+ 18 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java

@@ -221,9 +221,6 @@ public class DFSStripedOutputStream extends DFSOutputStream {
     private void clear() {
       for (int i = 0; i< numAllBlocks; i++) {
         buffers[i].clear();
-        if (i >= numDataBlocks) {
-          Arrays.fill(buffers[i].array(), (byte) 0);
-        }
       }
     }
 
@@ -844,6 +841,11 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 
   void writeParityCells() throws IOException {
     final ByteBuffer[] buffers = cellBuffers.getBuffers();
+    // Skips encoding and writing parity cells if there are no healthy parity
+    // data streamers
+    if (!checkAnyParityStreamerIsHealthy()) {
+      return;
+    }
     //encode the data cells
     encode(encoder, numDataBlocks, buffers);
     for (int i = numDataBlocks; i < numAllBlocks; i++) {
@@ -852,6 +854,19 @@ public class DFSStripedOutputStream extends DFSOutputStream {
     cellBuffers.clear();
   }
 
+  private boolean checkAnyParityStreamerIsHealthy() {
+    for (int i = numDataBlocks; i < numAllBlocks; i++) {
+      if (streamers.get(i).isHealthy()) {
+        return true;
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Skips encoding and writing parity cells as there are "
+          + "no healthy parity data streamers: " + streamers);
+    }
+    return false;
+  }
+
   void writeParity(int index, ByteBuffer buffer, byte[] checksumBuf)
       throws IOException {
     final StripedDataStreamer current = setCurrentStreamer(index);

+ 38 - 40
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java

@@ -30,16 +30,49 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ReplaceDatanodeOnFailure {
+  /**
+   * DEFAULT condition:
+   *   Let r be the replication number.
+   *   Let n be the number of existing datanodes.
+   *   Add a new datanode only if r >= 3 and either
+   *   (1) floor(r/2) >= n or (2) the block is hflushed/appended.
+   */
+  private static final Condition CONDITION_DEFAULT = new Condition() {
+    @Override
+    public boolean satisfy(final short replication,
+        final DatanodeInfo[] existings, final int n, final boolean isAppend,
+        final boolean isHflushed) {
+      return replication >= 3 &&
+          (n <= (replication / 2) || isAppend || isHflushed);
+    }
+  };
+  /** Return false unconditionally. */
+  private static final Condition CONDITION_FALSE = new Condition() {
+    @Override
+    public boolean satisfy(short replication, DatanodeInfo[] existings,
+        int nExistings, boolean isAppend, boolean isHflushed) {
+      return false;
+    }
+  };
+  /** Return true unconditionally. */
+  private static final Condition CONDITION_TRUE = new Condition() {
+    @Override
+    public boolean satisfy(short replication, DatanodeInfo[] existings,
+        int nExistings, boolean isAppend, boolean isHflushed) {
+      return true;
+    }
+  };
+
   /** The replacement policies */
   public enum Policy {
     /** The feature is disabled in the entire site. */
-    DISABLE(Condition.FALSE),
+    DISABLE(CONDITION_FALSE),
     /** Never add a new datanode. */
-    NEVER(Condition.FALSE),
-    /** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */
-    DEFAULT(Condition.DEFAULT),
+    NEVER(CONDITION_FALSE),
+    /** @see ReplaceDatanodeOnFailure#CONDITION_DEFAULT */
+    DEFAULT(CONDITION_DEFAULT),
     /** Always add a new datanode when an existing datanode is removed. */
-    ALWAYS(Condition.TRUE);
+    ALWAYS(CONDITION_TRUE);
 
     private final Condition condition;
 
@@ -54,41 +87,6 @@ public class ReplaceDatanodeOnFailure {
 
   /** Datanode replacement condition */
   private interface Condition {
-    /** Return true unconditionally. */
-    Condition TRUE = new Condition() {
-      @Override
-      public boolean satisfy(short replication, DatanodeInfo[] existings,
-          int nExistings, boolean isAppend, boolean isHflushed) {
-        return true;
-      }
-    };
-
-    /** Return false unconditionally. */
-    Condition FALSE = new Condition() {
-      @Override
-      public boolean satisfy(short replication, DatanodeInfo[] existings,
-          int nExistings, boolean isAppend, boolean isHflushed) {
-        return false;
-      }
-    };
-
-    /**
-     * DEFAULT condition:
-     *   Let r be the replication number.
-     *   Let n be the number of existing datanodes.
-     *   Add a new datanode only if r >= 3 and either
-     *   (1) floor(r/2) >= n; or
-     *   (2) r > n and the block is hflushed/appended.
-     */
-    Condition DEFAULT = new Condition() {
-      @Override
-      public boolean satisfy(final short replication,
-          final DatanodeInfo[] existings, final int n, final boolean isAppend,
-          final boolean isHflushed) {
-        return replication >= 3 &&
-            (n <= (replication / 2) || isAppend || isHflushed);
-      }
-    };
 
     /** Is the condition satisfied? */
     boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings,

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java


+ 5 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java

@@ -17,18 +17,16 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-
-import java.net.InetSocketAddress;
-import java.net.URI;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-
 import org.junit.Test;
 
+import java.net.InetSocketAddress;
+import java.net.URI;
+
+import static org.junit.Assert.assertEquals;
+
 /** Test NameNode port defaulting code. */
 public class TestDefaultNameNodePort {
 

+ 12 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java

@@ -17,17 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.collect.HashMultiset;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.net.unix.DomainSocket;
@@ -35,11 +25,20 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.HashMultiset;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 
 public class TestPeerCache {
-  static final Log LOG = LogFactory.getLog(TestPeerCache.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestPeerCache.class);
 
   private static class FakePeer implements Peer {
     private boolean closed = false;

+ 5 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java

@@ -17,14 +17,9 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertSame;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
+import com.google.common.base.Supplier;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -35,7 +30,10 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Supplier;
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertSame;
 
 public class TestLeaseRenewer {
   private final String FAKE_AUTHORITY="hdfs://nn1/";

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java


+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java

@@ -17,16 +17,8 @@
  */
 package org.apache.hadoop.hdfs.shortcircuit;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.util.ArrayList;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
-import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
@@ -34,9 +26,17 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
 
 public class TestShortCircuitShm {
-  public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestShortCircuitShm.class);
   
   private static final File TEST_BASE =
       new File(System.getProperty("test.build.data", "/tmp"));

+ 18 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java

@@ -17,6 +17,19 @@
  */
 package org.apache.hadoop.hdfs.util;
 
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -31,29 +44,16 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.junit.Assert;
-import org.junit.Test;
-
 /**
  * Test {@link ByteArrayManager}.
  */
 public class TestByteArrayManager {
   static {
-    GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
-        Level.ALL);
+    GenericTestUtils.setLogLevel(
+        LoggerFactory.getLogger(ByteArrayManager.class), Level.ALL);
   }
 
-  static final Log LOG = LogFactory.getLog(TestByteArrayManager.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestByteArrayManager.class);
 
   private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() {
     @Override
@@ -559,9 +559,8 @@ public class TestByteArrayManager {
   }
   
   public static void main(String[] args) throws Exception {
-    GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
-        Level.OFF);
-
+    GenericTestUtils.setLogLevel(LoggerFactory.getLogger(ByteArrayManager.class),
+                                 Level.OFF);
     final int arrayLength = 64 * 1024; //64k
     final int nThreads = 512;
     final int nAllocations = 1 << 15;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java


+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java

@@ -18,8 +18,6 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -38,6 +36,8 @@ import org.mockserver.integration.ClientAndServer;
 import org.mockserver.model.Header;
 import org.mockserver.model.HttpRequest;
 import org.mockserver.model.HttpResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
@@ -58,7 +58,8 @@ import static org.mockserver.model.HttpRequest.request;
 import static org.mockserver.model.HttpResponse.response;
 
 public class TestWebHDFSOAuth2 {
-  public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestWebHDFSOAuth2.class);
 
   private ClientAndServer mockWebHDFS;
   private ClientAndServer mockOAuthServer;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java → hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java


+ 65 - 4
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -184,6 +184,14 @@ Trunk (Unreleased)
     HDFS-9070. Allow fsck display pending replica location information for
     being-written blocks. (GAO Rui via jing9)
 
+    HDFS-9261. Erasure Coding: Skip encoding the data cells if all the parity data 
+    streamers are failed for the current block group. (Rakesh R via umamahesh)
+
+    HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. (szetszwo)
+
+    HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
+    (Rakesh R via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -841,6 +849,9 @@ Trunk (Unreleased)
       HDFS-8438. Erasure Coding: Allow concat striped files if they have the same
       ErasureCodingPolicy. (Walter Su via jing9)
 
+      HDFS-9275. Wait previous ErasureCodingWork to finish before schedule
+      another one. (Walter Su via yliu)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES
@@ -1524,9 +1535,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
     better efficiency. (Charlie Helin via wang)
 
-    HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-    values() since it creates a temporary array. (Staffan Friberg via yliu)
-
     HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
     BlockManager#excessReplicateMap. (yliu)
 
@@ -1599,6 +1607,17 @@ Release 2.8.0 - UNRELEASED
     HDFS-9255. Consolidate block recovery related implementation into a single
     class. (Walter Su via zhz)
 
+    HDFS-9295. Add a thorough test of the full KMS code path. 
+    (Daniel Templeton via zhz)
+
+    HDFS-8545. Refactor FS#getUsed() to use ContentSummary and add an API to fetch
+    the total file length from a specific path (J.Andreina via vinayakumarb)
+
+    HDFS-9229. Expose size of NameNode directory as a metric.
+    (Surendra Singh Lilhore via zhz)
+
+    HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@@ -1638,6 +1657,12 @@ Release 2.8.0 - UNRELEASED
     HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile().
     (Tony Wu via lei)
 
+    HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
+
+    HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
+
+    HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via lei)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
@@ -2179,6 +2204,27 @@ Release 2.8.0 - UNRELEASED
     HDFS-9297. Decomissioned capacity should not be considered for 
     configured/used capacity (Contributed by Kuhu Shukla)
 
+    HDFS-9044. Give Priority to FavouredNodes , before selecting
+    nodes from FavouredNode's Node Group (J.Andreina via vinayakumarb)
+
+    HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
+    saving namespace. (wang)
+
+    HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
+    Arpit Agarwal)
+
+    HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
+    size is smaller than IO buffer size. (zhz)
+
+    HDFS-9313. Possible NullPointerException in BlockManager if no excess
+    replica can be chosen. (mingma)
+
+    HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
+    (Xiaoyu Yao via cnauroth)
+
+    HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
+    endings, fails on Windows. (cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2195,6 +2241,9 @@ Release 2.7.2 - UNRELEASED
     HDFS-8099. Change "DFSInputStream has been closed already" message to
     debug log level (Charles Lamb via Colin P. McCabe)
 
+    HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+    values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
@@ -3273,7 +3322,19 @@ Release 2.7.0 - 2015-04-20
       HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
       Arpit Agarwal)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -211,12 +211,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>leveldbjni-all</artifactId>
       <version>1.8</version>
     </dependency>
-    <dependency>
-      <groupId>org.mock-server</groupId>
-      <artifactId>mockserver-netty</artifactId>
-      <version>3.9.2</version>
-      <scope>test</scope>
-    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1624,6 +1624,10 @@ public class BlockManager implements RwLock, BlockStatsMXBean {
     }
 
     if (block.isStriped()) {
+      if (pendingNum > 0) {
+        // Wait the previous recovery to finish.
+        return null;
+      }
       short[] indices = new short[liveBlockIndices.size()];
       for (int i = 0 ; i < liveBlockIndices.size(); i++) {
         indices[i] = liveBlockIndices.get(i);
@@ -1679,6 +1683,7 @@ public class BlockManager implements RwLock, BlockStatsMXBean {
     if (block.isStriped()) {
       assert rw instanceof ErasureCodingWork;
       assert rw.getTargets().length > 0;
+      assert pendingNum == 0: "Should wait the previous recovery to finish";
       String src = getBlockCollection(block).getName();
       ErasureCodingPolicy ecPolicy = null;
       try {

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -23,8 +23,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -33,13 +31,17 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /** 
  * This interface is used for choosing the desired number of targets
  * for placing block replicas.
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
+  static final Logger LOG = LoggerFactory.getLogger(
+      BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {

+ 32 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -138,20 +138,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       numOfReplicas = maxNodesAndReplicas[0];
       int maxNodesPerRack = maxNodesAndReplicas[1];
 
-      for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
-        DatanodeDescriptor favoredNode = favoredNodes.get(i);
-        // Choose a single node which is local to favoredNode.
-        // 'results' is updated within chooseLocalNode
-        final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
-            favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
-            results, avoidStaleNodes, storageTypes, false);
-        if (target == null) {
-          LOG.warn("Could not find a target for file " + src
-              + " with favored node " + favoredNode); 
-          continue;
-        }
-        favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
-      }
+      chooseFavouredNodes(src, numOfReplicas, favoredNodes,
+          favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
+          avoidStaleNodes, storageTypes);
 
       if (results.size() < numOfReplicas) {
         // Not enough favored nodes, choose other nodes.
@@ -177,6 +166,29 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
   }
 
+  protected void chooseFavouredNodes(String src, int numOfReplicas,
+      List<DatanodeDescriptor> favoredNodes,
+      Set<Node> favoriteAndExcludedNodes, long blocksize, int maxNodesPerRack,
+      List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
+      EnumMap<StorageType, Integer> storageTypes)
+      throws NotEnoughReplicasException {
+    for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas;
+        i++) {
+      DatanodeDescriptor favoredNode = favoredNodes.get(i);
+      // Choose a single node which is local to favoredNode.
+      // 'results' is updated within chooseLocalNode
+      final DatanodeStorageInfo target =
+          chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize,
+            maxNodesPerRack, results, avoidStaleNodes, storageTypes, false);
+      if (target == null) {
+        LOG.warn("Could not find a target for file " + src
+            + " with favored node " + favoredNode);
+        continue;
+      }
+      favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
+    }
+  }
+
   /** This is the implementation. */
   private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
                                     Node writer,
@@ -969,6 +981,12 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                 excessTypes);
       }
       firstOne = false;
+      if (cur == null) {
+        LOG.warn("No excess replica can be found. excessTypes: {}." +
+            " moreThanOne: {}. exactlyOne: {}.", excessTypes, moreThanOne,
+            exactlyOne);
+        break;
+      }
 
       // adjust rackmap, moreThanOne, and exactlyOne
       adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);

+ 70 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -54,16 +54,79 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
     super.initialize(conf, stats, clusterMap, host2datanodeMap);
   }
 
-  /** choose local node of localMachine as the target.
-   * if localMachine is not available, choose a node on the same nodegroup or 
-   * rack instead.
+  /**
+   * choose all good favored nodes as target.
+   * If no enough targets, then choose one replica from
+   * each bad favored node's node group.
+   * @throws NotEnoughReplicasException
+   */
+  @Override
+  protected void chooseFavouredNodes(String src, int numOfReplicas,
+      List<DatanodeDescriptor> favoredNodes,
+      Set<Node> favoriteAndExcludedNodes, long blocksize,
+      int maxNodesPerRack, List<DatanodeStorageInfo> results,
+      boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
+      throws NotEnoughReplicasException {
+    super.chooseFavouredNodes(src, numOfReplicas, favoredNodes,
+        favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
+        avoidStaleNodes, storageTypes);
+    if (results.size() < numOfReplicas) {
+      // Not enough replicas, choose from unselected Favorednode's Nodegroup
+      for (int i = 0;
+          i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
+        DatanodeDescriptor favoredNode = favoredNodes.get(i);
+        boolean chosenNode =
+            isNodeChosen(results, favoredNode);
+        if (chosenNode) {
+          continue;
+        }
+        NetworkTopologyWithNodeGroup clusterMapNodeGroup =
+            (NetworkTopologyWithNodeGroup) clusterMap;
+        // try a node on FavouredNode's node group
+        DatanodeStorageInfo target = null;
+        String scope =
+            clusterMapNodeGroup.getNodeGroup(favoredNode.getNetworkLocation());
+        try {
+          target =
+              chooseRandom(scope, favoriteAndExcludedNodes, blocksize,
+                maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+        } catch (NotEnoughReplicasException e) {
+          // catch Exception and continue with other favored nodes
+          continue;
+        }
+        if (target == null) {
+          LOG.warn("Could not find a target for file "
+              + src + " within nodegroup of favored node " + favoredNode);
+          continue;
+        }
+        favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
+      }
+    }
+  }
+
+  private boolean isNodeChosen(
+      List<DatanodeStorageInfo> results, DatanodeDescriptor favoredNode) {
+    boolean chosenNode = false;
+    for (int j = 0; j < results.size(); j++) {
+      if (results.get(j).getDatanodeDescriptor().equals(favoredNode)) {
+        chosenNode = true;
+        break;
+      }
+    }
+    return chosenNode;
+  }
+
+  /** choose local node of <i>localMachine</i> as the target.
+   * If localMachine is not available, will fallback to nodegroup/rack
+   * when flag <i>fallbackToNodeGroupAndLocalRack</i> is set.
    * @return the chosen node
    */
   @Override
   protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
       Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
       List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
-      EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
+      EnumMap<StorageType, Integer> storageTypes,
+      boolean fallbackToNodeGroupAndLocalRack)
       throws NotEnoughReplicasException {
     DatanodeStorageInfo localStorage = chooseLocalStorage(localMachine,
         excludedNodes, blocksize, maxNodesPerRack, results,
@@ -72,6 +135,9 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
       return localStorage;
     }
 
+    if (!fallbackToNodeGroupAndLocalRack) {
+      return null;
+    }
     // try a node on local node group
     DatanodeStorageInfo chosenStorage = chooseLocalNodeGroup(
         (NetworkTopologyWithNodeGroup)clusterMap, localMachine, excludedNodes, 
@@ -79,10 +145,6 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
     if (chosenStorage != null) {
       return chosenStorage;
     }
-
-    if (!fallbackToLocalRack) {
-      return null;
-    }
     // try a node on local rack
     return chooseLocalRack(localMachine, excludedNodes, 
         blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -30,6 +30,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -310,6 +311,20 @@ public abstract class Storage extends StorageInfo {
       return dirType;
     }    
 
+    /**
+     * Get storage directory size.
+     */
+    public long getDirecorySize() {
+      try {
+        if (!isShared() && root != null && root.exists()) {
+          return FileUtils.sizeOfDirectory(root);
+        }
+      } catch (Exception e) {
+        LOG.warn("Failed to get directory size :" + root, e);
+      }
+      return 0;
+    }
+
     public void read(File from, Storage storage) throws IOException {
       Properties props = readPropertiesFile(from);
       storage.setFieldsFromProperties(props, this);

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java

@@ -907,15 +907,10 @@ public final class ErasureCodingWorker {
 
       for (int i = 0; i < targetBuffers.length; i++) {
         if (targetBuffers[i] != null) {
-          cleanBuffer(targetBuffers[i]);
+          targetBuffers[i].clear();
         }
       }
     }
-    
-    private ByteBuffer cleanBuffer(ByteBuffer buffer) {
-      Arrays.fill(buffer.array(), (byte) 0);
-      return (ByteBuffer)buffer.clear();
-    }
 
     // send an empty packet to mark the end of the block
     private void endTargetBlocks(boolean[] targetsStatus) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -1064,6 +1064,8 @@ public class FSImage implements Closeable {
     } finally {
       removeFromCheckpointing(imageTxId);
     }
+    //Update NameDirSize Metric
+    getStorage().updateNameDirSize();
   }
 
   /**
@@ -1244,6 +1246,8 @@ public class FSImage implements Closeable {
     // we won't miss this log segment on a restart if the edits directories
     // go missing.
     storage.writeTransactionIdFileToStorage(getEditLog().getCurSegmentTxId());
+    //Update NameDirSize Metric
+    getStorage().updateNameDirSize();
     return new CheckpointSignature(this);
   }
 

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -3729,9 +3729,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     public void run() {
       while (fsRunning && shouldRun) {
         try {
-          FSEditLog editLog = getFSImage().getEditLog();
-          long numEdits =
-              editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId();
+          long numEdits = getTransactionsSinceLastLogRoll();
           if (numEdits > rollThreshold) {
             FSNamesystem.LOG.info("NameNode rolling its own edit log because"
                 + " number of edits in open segment exceeds threshold of "
@@ -6407,6 +6405,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return VersionInfo.getVersion();
   }
 
+  @Override // NameNodeStatusMXBean
+  public String getNameDirSize() {
+    return getFSImage().getStorage().getNNDirectorySize();
+  }
+
   /**
    * Verifies that the given identifier and password are valid and match.
    * @param identifier Token identifier.
@@ -7500,9 +7503,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc");
         if (isCallerContextEnabled &&
             callerContext != null &&
-            callerContext.isValid() &&
-            (callerContext.getSignature() == null ||
-                callerContext.getSignature().length <= callerSignatureMaxLen)) {
+            callerContext.isContextValid()) {
           sb.append("\t").append("callerContext=");
           if (callerContext.getContext().length() > callerContextMaxLen) {
             sb.append(callerContext.getContext().substring(0,
@@ -7510,7 +7511,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           } else {
             sb.append(callerContext.getContext());
           }
-          if (callerContext.getSignature() != null) {
+          if (callerContext.getSignature() != null &&
+              callerContext.getSignature().length > 0 &&
+              callerContext.getSignature().length <= callerSignatureMaxLen) {
             sb.append(":");
             sb.append(new String(callerContext.getSignature(),
                 CallerContext.SIGNATURE_ENCODING));

+ 25 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -52,6 +53,7 @@ import org.apache.hadoop.hdfs.util.PersistentLongFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.Time;
+import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -148,6 +150,11 @@ public class NNStorage extends Storage implements Closeable,
    */
   private HashMap<String, String> deprecatedProperties;
 
+  /**
+   * Name directories size for metric.
+   */
+  private Map<String, Long> nameDirSizeMap = new HashMap<>();
+
   /**
    * Construct the NNStorage.
    * @param conf Namenode configuration.
@@ -166,6 +173,8 @@ public class NNStorage extends Storage implements Closeable,
     setStorageDirectories(imageDirs, 
                           Lists.newArrayList(editsDirs),
                           FSNamesystem.getSharedEditsDirs(conf));
+    //Update NameDirSize metric value after NN start
+    updateNameDirSize();
   }
 
   @Override // Storage
@@ -1075,4 +1084,20 @@ public class NNStorage extends Storage implements Closeable,
         getBlockPoolID(),
         getCTime());
   }
+
+  public String getNNDirectorySize() {
+    return JSON.toString(nameDirSizeMap);
+  }
+
+  public void updateNameDirSize() {
+    Map<String, Long> nnDirSizeMap = new HashMap<>();
+    for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
+      StorageDirectory sd = it.next();
+      if (!sd.isShared()) {
+        nnDirSizeMap.put(sd.getRoot().getAbsolutePath(), sd.getDirecorySize());
+      }
+    }
+    nameDirSizeMap.clear();
+    nameDirSizeMap.putAll(nnDirSizeMap);
+  }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java

@@ -272,4 +272,9 @@ public interface NameNodeMXBean {
    */
   public Map<String, Integer> getDistinctVersions();
   
+  /**
+   * Get namenode directory size.
+   */
+  String getNameDirSize();
+
 }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java

@@ -372,6 +372,8 @@ public class EditLogTailer {
           } finally {
             namesystem.cpUnlock();
           }
+          //Update NameDirSize Metric
+          namesystem.getFSImage().getStorage().updateNameDirSize();
         } catch (EditLogInputException elie) {
           LOG.warn("Error while reading edits from disk. Will try again.", elie);
         } catch (InterruptedException ie) {
@@ -463,4 +465,4 @@ public class EditLogTailer {
       return cachedActiveProxy;
     }
   }
-}
+}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -2117,6 +2117,28 @@ public class MiniDFSCluster {
     getMaterializedReplica(i, blk).corruptMeta();
   }
 
+  /**
+   * Corrupt the metadata of a block by deleting it.
+   * @param i index of the datanode
+   * @param blk name of the block.
+   */
+  public void deleteMeta(int i, ExtendedBlock blk)
+      throws IOException {
+    getMaterializedReplica(i, blk).deleteMeta();
+  }
+
+  /**
+   * Corrupt the metadata of a block by truncating it to a new size.
+   * @param i index of the datanode.
+   * @param blk name of the block.
+   * @param newSize the new size of the metadata file.
+   * @throws IOException if any I/O errors.
+   */
+  public void truncateMeta(int i, ExtendedBlock blk, int newSize)
+      throws IOException {
+    getMaterializedReplica(i, blk).truncateMeta(newSize);
+  }
+
   public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
       long newGenStamp) throws IOException {
     File blockFile = getBlockFile(dnIndex, blk);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java

@@ -61,10 +61,10 @@ public class StripedFileTestUtil {
   public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
   public static final int BLOCK_STRIPE_SIZE = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS;
 
-  static final int stripesPerBlock = 4;
-  static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
+  public static final int stripesPerBlock = 4;
+  public static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
+  public static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
+  public static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
 
 
   static byte[] generateBytes(int cnt) {

+ 1657 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java

@@ -0,0 +1,1657 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.net.URI;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedAction;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.server.KMSConfiguration;
+import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider;
+import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * This class tests the ACLs system through the full code path.  It overlaps
+ * slightly with the ACL tests in common, but the approach is more holistic.
+ *
+ * <b>NOTE:</b> Because of the mechanics of JAXP, when the KMS config files are
+ * written to disk, a config param with a blank value ("") will be written in a
+ * way that the KMS will read as unset, which is different from blank. For this
+ * reason, when testing the effects of blank config params, this test class
+ * sets the values of those config params to a space (" ").  A whitespace value
+ * will be preserved by JAXP when writing out the config files and will be
+ * interpreted by KMS as a blank value. (The KMS strips whitespace from ACL
+ * values before interpreting them.)
+ */
+public class TestAclsEndToEnd {
+  private static final Log LOG =
+      LogFactory.getLog(TestAclsEndToEnd.class.getName());
+  private static final String TEXT =
+      "The blue zone is for loading and unloading only. "
+      + "Please park in the red zone.";
+  private static final Path ZONE1 = new Path("/tmp/BLUEZONE");
+  private static final Path ZONE2 = new Path("/tmp/REDZONE");
+  private static final Path ZONE3 = new Path("/tmp/LOADINGZONE");
+  private static final Path ZONE4 = new Path("/tmp/UNLOADINGZONE");
+  private static final Path FILE1 = new Path(ZONE1, "file1");
+  private static final Path FILE1A = new Path(ZONE1, "file1a");
+  private static final Path FILE2 = new Path(ZONE2, "file2");
+  private static final Path FILE3 = new Path(ZONE3, "file3");
+  private static final Path FILE4 = new Path(ZONE4, "file4");
+  private static final String KEY1 = "key1";
+  private static final String KEY2 = "key2";
+  private static final String KEY3 = "key3";
+  private static UserGroupInformation realUgi;
+  private static String realUser;
+
+  private MiniKMS miniKMS;
+  private File kmsDir;
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+
+  @BeforeClass
+  public static void captureUser() throws IOException {
+    realUgi = UserGroupInformation.getCurrentUser();
+    realUser = System.getProperty("user.name");
+  }
+
+  /**
+   * Extract the URI for the miniKMS.
+   *
+   * @return the URI for the miniKMS
+   */
+  private String getKeyProviderURI() {
+    return KMSClientProvider.SCHEME_NAME + "://" +
+        miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
+  }
+
+  /**
+   * Write out the config files needed by the miniKMS.  The miniKMS doesn't
+   * provide a way to set the configs directly, so the only way to pass config
+   * parameters is to write them out into config files.
+   *
+   * @param confDir the directory into which to write the configs
+   * @param conf the config to write.
+   * @throws IOException
+   */
+  private void writeConf(File confDir, Configuration conf)
+      throws IOException {
+    URI keystore = new Path(kmsDir.getAbsolutePath(), "kms.keystore").toUri();
+
+    conf.set(KMSConfiguration.KEY_PROVIDER_URI, "jceks://file@" + keystore);
+    conf.set("hadoop.kms.authentication.type", "simple");
+
+    Writer writer =
+        new FileWriter(new File(confDir, KMSConfiguration.KMS_SITE_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_ACLS_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    //create empty core-site.xml
+    writer = new FileWriter(new File(confDir, "core-site.xml"));
+    new Configuration(false).writeXml(writer);
+    writer.close();
+  }
+
+  /**
+   * Setup a fresh miniKMS and miniDFS.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf) throws Exception {
+    setup(conf, true, true);
+  }
+
+  /**
+   * Setup a fresh miniDFS and a miniKMS.  The resetKms parameter controls
+   * whether the miniKMS will start fresh or reuse the existing data.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @param resetKms whether to start a fresh miniKMS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf, boolean resetKms) throws Exception {
+    setup(conf, resetKms, true);
+  }
+
+  /**
+   * Setup a miniDFS and miniKMS.  The resetKms and resetDfs parameters control
+   * whether the services will start fresh or reuse the existing data.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @param resetKms whether to start a fresh miniKMS
+   * @param resetDfs whether to start a fresh miniDFS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf, boolean resetKms, boolean resetDfs)
+          throws Exception {
+    if (resetKms) {
+      FileSystemTestHelper fsHelper = new FileSystemTestHelper();
+
+      kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
+
+      Assert.assertTrue(kmsDir.mkdirs());
+    }
+
+    writeConf(kmsDir, conf);
+
+    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
+
+    miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
+    miniKMS.start();
+
+    conf = new HdfsConfiguration();
+
+    // Set up java key store
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".users",
+        "keyadmin,hdfs,user");
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".hosts",
+        "*");
+    conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
+        getKeyProviderURI());
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
+        true);
+
+    MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf);
+
+    cluster = clusterBuilder.numDataNodes(1).format(resetDfs).build();
+    fs = cluster.getFileSystem();
+  }
+
+  /**
+   * Stop the miniKMS and miniDFS.
+   */
+  private void teardown() {
+    // Restore login user
+    UserGroupInformation.setLoginUser(realUgi);
+
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+
+    miniKMS.stop();
+  }
+
+  /**
+   * Return a new {@link Configuration} with KMS ACLs appropriate to pass the
+   * full ACL test in {@link #doFullAclTest()} set.
+   *
+   * @param hdfsUgi the hdfs user
+   * @param keyadminUgi the keyadmin user
+   * @return the configuration
+   */
+  private static Configuration getBaseConf(UserGroupInformation hdfsUgi,
+      UserGroupInformation keyadminUgi) {
+    Configuration conf = new Configuration();
+
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.ROLLOVER",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_KEYS",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "*");
+
+    return conf;
+  }
+
+  /**
+   * Set the recommended blacklists.
+   *
+   * @param hdfsUgi the hdfs user
+   */
+  private static void setBlacklistAcls(Configuration conf,
+      UserGroupInformation hdfsUgi) {
+
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DELETE",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.ROLLOVER",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GET", "*");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.SET_KEY_MATERIAL",
+        "*");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        hdfsUgi.getUserName());
+  }
+
+  /**
+   * Set the key ACLs appropriate to pass the full ACL test in
+   * {@link #doFullAclTest()} using the specified prefix.  The prefix should
+   * either be "whitelist.key.acl." or "key.acl.key1.".
+   *
+   * @param conf the configuration
+   * @param prefix the ACL prefix
+   * @param hdfsUgi the hdfs user
+   * @param keyadminUgi the keyadmin user
+   * @param userUgi the normal user
+   */
+  private static void setKeyAcls(Configuration conf, String prefix,
+      UserGroupInformation hdfsUgi,
+      UserGroupInformation keyadminUgi,
+      UserGroupInformation userUgi) {
+
+    conf.set(prefix + "MANAGEMENT", keyadminUgi.getUserName());
+    conf.set(prefix + "READ", hdfsUgi.getUserName());
+    conf.set(prefix + "GENERATE_EEK", hdfsUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".DECRYPT_EEK",
+        userUgi.getUserName());
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs.
+   * The configuration used is the correct configuration to pass the full ACL
+   * test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithWhitelist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setBlacklistAcls(conf, hdfsUgi);
+    setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with key ACLs.
+   * The configuration used is the correct configuration to pass the full ACL
+   * test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithKeyAcls() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setBlacklistAcls(conf, hdfsUgi);
+    setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".",
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs
+   * and without blacklist ACLs.  The configuration used is the correct
+   * configuration to pass the full ACL test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithWhitelistWithoutBlacklist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs
+   * and without blacklist ACLs. The configuration used is the correct
+   * configuration to pass the full ACL test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithKeyAclsWithoutBlacklist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".",
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Run a full key life cycle test using the provided configuration and users.
+   *
+   * @param conf the configuration
+   * @param hdfs the user to use as the hdfs user
+   * @param keyadmin the user to use as the keyadmin user
+   * @param user the user to use as the normal user
+   * @throws Exception thrown if there is a test failure
+   */
+  private void doFullAclTest(final Configuration conf,
+      final UserGroupInformation hdfsUgi,
+      final UserGroupInformation keyadminUgi,
+      final UserGroupInformation userUgi) throws Exception {
+
+    try {
+      setup(conf);
+
+      // Create a test key
+      assertTrue("Exception during creation of key " + KEY1 + " by "
+          + keyadminUgi.getUserName(), createKey(keyadminUgi, KEY1, conf));
+
+      // Fail to create a test key
+      assertFalse("Allowed creation of key " + KEY2 + " by "
+          + hdfsUgi.getUserName(), createKey(hdfsUgi, KEY2, conf));
+      assertFalse("Allowed creation of key " + KEY2 + " by "
+          + userUgi.getUserName(), createKey(userUgi, KEY2, conf));
+
+      // Create a directory and chown it to the normal user.
+      fs.mkdirs(ZONE1);
+      fs.setOwner(ZONE1, userUgi.getUserName(),
+          userUgi.getPrimaryGroupName());
+
+      // Create an EZ
+      assertTrue("Exception during creation of EZ " + ZONE1 + " by "
+          + hdfsUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(hdfsUgi, KEY1, ZONE1));
+
+      // Fail to create an EZ
+      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
+          + keyadminUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(keyadminUgi, KEY1, ZONE2));
+      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
+          + userUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(userUgi, KEY1, ZONE2));
+
+      // Create a file in the zone
+      assertTrue("Exception during creation of file " + FILE1 + " by "
+          + userUgi.getUserName(), createFile(userUgi, FILE1, TEXT));
+
+      // Fail to create a file in the zone
+      assertFalse("Allowed creation of file " + FILE1A + " by "
+          + hdfsUgi.getUserName(), createFile(hdfsUgi, FILE1A, TEXT));
+      assertFalse("Allowed creation of file " + FILE1A + " by "
+          + keyadminUgi.getUserName(), createFile(keyadminUgi, FILE1A, TEXT));
+
+      // Read a file in the zone
+      assertTrue("Exception while reading file " + FILE1 + " by "
+          + userUgi.getUserName(), compareFile(userUgi, FILE1, TEXT));
+
+      // Fail to read a file in the zone
+      assertFalse("Allowed reading of file " + FILE1 + " by "
+          + hdfsUgi.getUserName(), compareFile(hdfsUgi, FILE1, TEXT));
+      assertFalse("Allowed reading of file " + FILE1 + " by "
+          + keyadminUgi.getUserName(), compareFile(keyadminUgi, FILE1, TEXT));
+
+      // Remove the zone
+      fs.delete(ZONE1, true);
+
+      // Fail to remove the key
+      assertFalse("Allowed deletion of file " + FILE1 + " by "
+          + hdfsUgi.getUserName(), deleteKey(hdfsUgi, KEY1));
+      assertFalse("Allowed deletion of file " + FILE1 + " by "
+          + userUgi.getUserName(), deleteKey(userUgi, KEY1));
+
+      // Remove
+      assertTrue("Exception during deletion of file " + FILE1 + " by "
+          + keyadminUgi.getUserName(), deleteKey(keyadminUgi, KEY1));
+    } finally {
+      fs.delete(ZONE1, true);
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+  }
+
+  /**
+   * Test that key creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateKey() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with correct config"
+          + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with correct config"
+          + " using default key ACLs", createKey(realUgi, KEY2, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation with blacklist for CREATE",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation without CREATE KMS ACL",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation without MANAGMENT key ACL",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key3
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY3 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation when default key ACL should have been"
+          + " overridden by key ACL", createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because the default setting for KMS ACLs is fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with default KMS ACLs",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that zone creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory.
+    conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE1);
+
+      assertTrue("Exception during zone creation with correct config using"
+          + " whitelist key ACLs", createEncryptionZone(realUgi, KEY1, ZONE1));
+    } finally {
+      fs.delete(ZONE1, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE2);
+
+      assertTrue("Exception during zone creation with correct config using"
+          + " default key ACLs", createEncryptionZone(realUgi, KEY1, ZONE2));
+    } finally {
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed creation of zone when default key ACLs should have"
+          + " been overridden by key ACL",
+            createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed zone creation of zone with blacklisted GET_METADATA",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed zone creation of zone with blacklisted GENERATE_EEK",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL but works because defaults for KMS ACLs are fully
+    // permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertTrue("Exception during zone creation with default KMS ACLs",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GET_METADATA KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GET_METADATA KMS ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GET_METADATA KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GENERATE_EEK KMS ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing READ key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without READ ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GENERATE_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GENERATE_EEK ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+  }
+
+  /**
+   * Test that in-zone file creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateFileInEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    assertTrue(new File(kmsDir, "kms.keystore").length() == 0);
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      fs.mkdirs(ZONE1);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE1));
+      fs.mkdirs(ZONE2);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE2));
+      fs.mkdirs(ZONE3);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+      fs.mkdirs(ZONE4);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+      fs.delete(ZONE2, true);
+      fs.delete(ZONE3, true);
+      fs.delete(ZONE4, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory or DFS dierctory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with correct config"
+          + " using whitelist ACL", createFile(realUgi, FILE1, TEXT));
+    } finally {
+      fs.delete(ZONE1, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with correct config"
+          + " using whitelist ACL", createFile(realUgi, FILE2, TEXT));
+    } finally {
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation when default key ACLs should have been"
+          + " overridden by key ACL", createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation with blacklist for GENERATE_EEK",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation with blacklist for DECRYPT_EEK",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because default KMS ACLs are fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with default KMS ACLs",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing GENERATE_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without GENERATE_EEK KMS ACL",
+          createFile(realUgi, FILE4, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without DECRYPT_EEK KMS ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing GENERATE_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without GENERATE_EEK key ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without DECRYPT_EEK key ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that in-zone file read is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testReadFileInEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    assertTrue(new File(kmsDir, "kms.keystore").length() == 0);
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      fs.mkdirs(ZONE1);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE1));
+      assertTrue("Exception during file creation",
+              createFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory or DFS dierctory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with correct config with"
+          + " whitelist ACLs", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with correct config"
+          + " with default ACLs", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read when default key ACLs should have been"
+          + " overridden by key ACL", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read with blacklist for DECRYPT_EEK",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because default KMS ACLs are fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with default KMS ACLs",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read without DECRYPT_EEK KMS ACL",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // Denied because of missing DECRYPT_EEK key ACL
+    conf = new Configuration();
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read without DECRYPT_EEK key ACL",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that key deletion is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testDeleteKey() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY2, conf));
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with correct config"
+          + " using whitelist key ACLs", deleteKey(realUgi, KEY1));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with correct config"
+          + " using default key ACLs", deleteKey(realUgi, KEY2));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion with blacklist for DELETE",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion without DELETE KMS ACL",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+
+    // Missing key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion without MANAGMENT key ACL",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key3
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY3 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion when default key ACL should have been"
+          + " overridden by key ACL", deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because the default setting for KMS ACLs is fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with default KMS ACLs",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Create a key as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @param conf the configuration
+   * @return whether the key creation succeeded
+   */
+  private boolean createKey(UserGroupInformation ugi, final String key,
+      final Configuration conf) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        try {
+          DFSTestUtil.createKey(key, cluster, conf);
+        } catch (NoSuchAlgorithmException ex) {
+          throw new IOException(ex);
+        }
+      }
+    });
+  }
+
+  /**
+   * Create a zone as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @param zone the target zone
+   * @return whether the zone creation succeeded
+   */
+  private boolean createEncryptionZone(UserGroupInformation ugi,
+      final String key, final Path zone) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        cluster.getFileSystem().createEncryptionZone(zone, key);
+      }
+    });
+  }
+
+  /**
+   * Create a file as the specified user.
+   *
+   * @param ugi the target user
+   * @param file the target file
+   * @param text the target file contents
+   * @return whether the file creation succeeded
+   */
+  private boolean createFile(UserGroupInformation ugi,
+      final Path file, final String text) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        FSDataOutputStream dout = cluster.getFileSystem().create(file);
+        PrintWriter out = new PrintWriter(new OutputStreamWriter(dout));
+
+        out.println(text);
+        out.close();
+      }
+    });
+  }
+
+  /**
+   * Read a file as the specified user and compare the contents to expectations.
+   *
+   * @param ugi the target user
+   * @param file the target file
+   * @param text the expected file contents
+   * @return true if the file read succeeded and the contents were as expected
+   */
+  private boolean compareFile(UserGroupInformation ugi,
+      final Path file, final String text) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        FSDataInputStream din =  cluster.getFileSystem().open(file);
+        BufferedReader in = new BufferedReader(new InputStreamReader(din));
+
+        assertEquals("The text read does not match the text written",
+            text, in.readLine());
+      }
+    });
+  }
+
+  /**
+   * Delete a key as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @return whether the key deletion succeeded
+   */
+  private boolean deleteKey(UserGroupInformation ugi, final String key)
+      throws IOException, InterruptedException {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        cluster.getNameNode().getNamesystem().getProvider().deleteKey(key);
+      }
+    });
+  }
+
+  /**
+   * Perform an operation as the given user.  This method requires setting the
+   * login user. This method does not restore the login user to the setting
+   * from prior to the method call.
+   *
+   * @param ugi the target user
+   * @param op the operation to perform
+   * @return true if the operation succeeded without throwing an exception
+   */
+  private boolean doUserOp(UserGroupInformation ugi, final UserOp op) {
+    UserGroupInformation.setLoginUser(ugi);
+
+    // Create a test key
+    return ugi.doAs(new PrivilegedAction<Boolean>() {
+      @Override
+      public Boolean run() {
+        try {
+          op.execute();
+
+          return true;
+        } catch (IOException ex) {
+          LOG.error("IOException thrown during doAs() operation", ex);
+
+          return false;
+        }
+      }
+    });
+  }
+
+  /**
+   * Simple interface that defines an operation to perform.
+   */
+  private static interface UserOp {
+    public void execute() throws IOException;
+  }
+}

+ 29 - 91
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -22,11 +22,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -35,12 +32,15 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A JUnit test for corrupted file handling.
@@ -70,6 +70,8 @@ import org.mockito.Mockito;
  *     replica was created from the non-corrupted replica.
  */
 public class TestCrcCorruption {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestCrcCorruption.class);
 
   private DFSClientFaultInjector faultInjector;
 
@@ -167,90 +169,26 @@ public class TestCrcCorruption {
       // file disallows this Datanode to send data to another datanode.
       // However, a client is alowed access to this block.
       //
-      File storageDir = cluster.getInstanceStorageDir(0, 1);
-      String bpid = cluster.getNamesystem().getBlockPoolId();
-      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
-      int num = 0;
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
-            blocks[idx].getName().endsWith(".meta")) {
-          num++;
-          if (num % 3 == 0) {
-            //
-            // remove .meta file
-            //
-            System.out.println("Deliberately removing file " + blocks[idx].getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
-          } else if (num % 3 == 1) {
-            //
-            // shorten .meta file
-            //
-            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-            FileChannel channel = file.getChannel();
-            int newsize = random.nextInt((int)channel.size()/2);
-            System.out.println("Deliberately truncating file " + 
-                               blocks[idx].getName() + 
-                               " to size " + newsize + " bytes.");
-            channel.truncate(newsize);
-            file.close();
-          } else {
-            //
-            // corrupt a few bytes of the metafile
-            //
-            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-            FileChannel channel = file.getChannel();
-            long position = 0;
-            //
-            // The very first time, corrupt the meta header at offset 0
-            //
-            if (num != 2) {
-              position = (long)random.nextInt((int)channel.size());
-            }
-            int length = random.nextInt((int)(channel.size() - position + 1));
-            byte[] buffer = new byte[length];
-            random.nextBytes(buffer);
-            channel.write(ByteBuffer.wrap(buffer), position);
-            System.out.println("Deliberately corrupting file " + 
-                               blocks[idx].getName() + 
-                               " at offset " + position +
-                               " length " + length);
-            file.close();
-          }
-        }
-      }
-      
-      //
-      // Now deliberately corrupt all meta blocks from the second
-      // directory of the first datanode
-      //
-      storageDir = cluster.getInstanceStorageDir(0, 1);
-      data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
-      blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
-
-      int count = 0;
-      File previous = null;
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // Move the previous metafile into the current one.
-          //
-          count++;
-          if (count % 2 == 0) {
-            System.out.println("Deliberately insertimg bad crc into files " +
-                                blocks[idx].getName() + " " + previous.getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
-            assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
-            assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
-            previous = null;
-          } else {
-            previous = blocks[idx];
-          }
+      final int dnIdx = 0;
+      final DataNode dn = cluster.getDataNodes().get(dnIdx);
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      List<FinalizedReplica> replicas =
+          dn.getFSDataset().getFinalizedBlocks(bpid);
+      assertTrue("Replicas do not exist", !replicas.isEmpty());
+
+      for (int idx = 0; idx < replicas.size(); idx++) {
+        FinalizedReplica replica = replicas.get(idx);
+        ExtendedBlock eb = new ExtendedBlock(bpid, replica);
+        if (idx % 3 == 0) {
+          LOG.info("Deliberately removing meta for block " + eb);
+          cluster.deleteMeta(dnIdx, eb);
+        } else if (idx % 3 == 1) {
+          final int newSize = 2;  // bytes
+          LOG.info("Deliberately truncating meta file for block " +
+              eb + " to size " +  newSize + " bytes.");
+          cluster.truncateMeta(dnIdx, eb, newSize);
+        } else {
+          cluster.corruptMeta(dnIdx, eb);
         }
       }
 
@@ -260,7 +198,7 @@ public class TestCrcCorruption {
       //
       assertTrue("Corrupted replicas not handled properly.",
                  util.checkFiles(fs, "/srcdat"));
-      System.out.println("All File still have a valid replica");
+      LOG.info("All File still have a valid replica");
 
       //
       // set replication factor back to 1. This causes only one replica of
@@ -273,7 +211,7 @@ public class TestCrcCorruption {
       //System.out.println("All Files done with removing replicas");
       //assertTrue("Excess replicas deleted. Corrupted replicas found.",
       //           util.checkFiles(fs, "/srcdat"));
-      System.out.println("The excess-corrupted-replica test is disabled " +
+      LOG.info("The excess-corrupted-replica test is disabled " +
                          " pending HADOOP-1557");
 
       util.cleanup(fs, "/srcdat");

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java

@@ -57,6 +57,8 @@ public class TestDFSStripedOutputStream {
     int numDNs = dataBlocks + parityBlocks + 2;
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);

+ 15 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java

@@ -131,15 +131,16 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   private static final List<Integer> LENGTHS = newLengths();
 
-  static int getLength(int i) {
-    return LENGTHS.get(i);
+  static Integer getLength(int i) {
+    return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
   }
 
+  private static final Random RANDOM = new Random();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private final Path dir = new Path("/"
       + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
-  private final Random random = new Random();
 
   private void setup(Configuration conf) throws IOException {
     final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
@@ -166,19 +167,6 @@ public class TestDFSStripedOutputStreamWithFailure {
     return conf;
   }
 
-  @Test(timeout=240000)
-  public void testDatanodeFailure56() throws Exception {
-    runTest(getLength(56));
-  }
-
-  @Test(timeout=240000)
-  public void testDatanodeFailureRandomLength() throws Exception {
-    int lenIndex = random.nextInt(LENGTHS.size());
-    LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
-        + lenIndex);
-    runTest(getLength(lenIndex));
-  }
-
   @Test(timeout=240000)
   public void testMultipleDatanodeFailure56() throws Exception {
     runTestWithMultipleFailure(getLength(56));
@@ -190,7 +178,7 @@ public class TestDFSStripedOutputStreamWithFailure {
    */
   //@Test(timeout=240000)
   public void testMultipleDatanodeFailureRandomLength() throws Exception {
-    int lenIndex = random.nextInt(LENGTHS.size());
+    int lenIndex = RANDOM.nextInt(LENGTHS.size());
     LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
         + lenIndex);
     runTestWithMultipleFailure(getLength(lenIndex));
@@ -484,7 +472,16 @@ public class TestDFSStripedOutputStreamWithFailure {
         = new TestDFSStripedOutputStreamWithFailure();
     private void run(int offset) {
       final int i = offset + getBase();
-      final int length = getLength(i);
+      final Integer length = getLength(i);
+      if (length == null) {
+        System.out.println("Skip test " + i + " since length=null.");
+        return;
+      }
+      if (RANDOM.nextInt(16) != 0) {
+        System.out.println("Test " + i + ", length=" + length
+            + ", is not chosen to run.");
+        return;
+      }
       System.out.println("Run test " + i + ", length=" + length);
       test.runTest(length);
     }

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure020 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure030 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure040 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure050 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure060 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure070 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure080 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure090 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure100 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure110 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure120 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure130 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure140 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure150 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure160 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure170 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure180 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure190 extends TestBase {}

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java

@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure200 extends TestBase {}

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java

@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure210 extends TestBase {
+}

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -1048,4 +1048,32 @@ public class TestDistributedFileSystem {
       cluster.shutdown();
     }
   }
+
+  @Test(timeout = 30000)
+  public void testTotalDfsUsed() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      FileSystem fs = cluster.getFileSystem();
+      // create file under root
+      FSDataOutputStream File1 = fs.create(new Path("/File1"));
+      File1.write("hi".getBytes());
+      File1.close();
+      // create file under sub-folder
+      FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2"));
+      File2.write("hi".getBytes());
+      File2.close();
+      // getUsed(Path) should return total len of all the files from a path
+      assertEquals(2, fs.getUsed(new Path("/Folder1")));
+      //getUsed() should return total length of all files in filesystem
+      assertEquals(4, fs.getUsed());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+        cluster = null;
+      }
+    }
+  }
+
 }

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä