Prechádzať zdrojové kódy

Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-2841@1612742 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 11 rokov pred
rodič
commit
341695e731
100 zmenil súbory, kde vykonal 3609 pridanie a 787 odobranie
  1. 11 0
      hadoop-common-project/hadoop-auth/pom.xml
  2. 74 7
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 1 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
  4. 174 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java
  5. 166 69
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  6. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java
  7. 26 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  8. 160 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  9. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  10. 317 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  11. 33 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  12. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
  13. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  14. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  15. 42 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  16. 89 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  17. 13 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  18. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
  19. 55 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java
  20. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
  21. 16 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
  22. 4 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
  23. 8 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
  24. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
  25. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
  26. 20 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  27. 4 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  28. 26 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  29. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
  30. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
  31. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  32. 6 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  33. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
  34. 46 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
  35. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  36. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
  37. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
  38. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
  39. 46 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
  40. 15 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java
  41. 25 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
  42. 51 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Timer.java
  43. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Tool.java
  44. 47 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  45. 3 1
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  46. 2 0
      hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm
  47. 53 21
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java
  48. 93 31
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
  49. 8 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  50. 190 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
  51. 19 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java
  52. 54 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  53. 52 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  54. 22 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
  55. 4 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
  56. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  57. 6 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
  58. 34 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
  59. 4 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
  60. 14 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
  61. 56 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
  62. 18 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
  63. 100 26
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
  64. 52 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
  65. 15 0
      hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
  66. 149 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
  67. 96 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  68. 12 24
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
  69. 0 177
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java
  70. 13 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  71. 20 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
  72. 42 8
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  73. 103 7
      hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
  74. 100 11
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
  75. 19 6
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
  76. 12 0
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
  77. 2 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
  78. 13 7
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  79. 120 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java
  80. 7 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
  81. 4 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
  82. 4 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
  83. 58 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  84. 169 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
  85. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  86. 31 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  87. 21 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
  88. 28 29
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  89. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  90. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
  91. 18 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
  92. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  93. 10 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
  94. 1 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  95. 7 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  96. 49 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  97. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  98. 21 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  99. 34 31
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  100. 32 27
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

+ 11 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -139,6 +139,17 @@
           <attach>true</attach>
           <attach>true</attach>
         </configuration>
         </configuration>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 

+ 74 - 7
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -36,10 +36,6 @@ Trunk (Unreleased)
 
 
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
 
 
-    HADOOP-7664. Remove warmings when overriding final parameter configuration
-    if the override value is same as the final parameter value.
-    (Ravi Prakash via suresh)
-
     HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin 
     HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin 
     Jetly via jitendra)
     Jetly via jitendra)
 
 
@@ -162,9 +158,6 @@ Trunk (Unreleased)
 
 
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
 
 
-    HADOOP-10607. Create API to separate credential/password storage from
-    applications. (Larry McCay via omalley)
-
     HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. 
     HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. 
     (tucu)
     (tucu)
 
 
@@ -182,6 +175,20 @@ Trunk (Unreleased)
 
 
     HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
     HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
 
 
+    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
+
+    HADOOP-10841. EncryptedKeyVersion should have a key name property. 
+    (asuresh via tucu)
+
+    HADOOP-10842. CryptoExtension generateEncryptedKey method should 
+    receive the key name. (asuresh via tucu)
+
+    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common. 
+    (asuresh via tucu)
+
+    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
+    in the REST API. (asuresh via tucu)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -383,6 +390,18 @@ Trunk (Unreleased)
 
 
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
 
 
+    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
+    (Mike Yoder via wang)
+
+    HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
+    System. (Shanyu Zhao via cnauroth)
+
+    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is 
+    thread-unsafe. (benoyantony viat tucu)
+
+    HADOOP-10881. Clarify usage of encryption and encrypted encryption
+    key in KeyProviderCryptoExtension. (wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -401,6 +420,38 @@ Release 2.6.0 - UNRELEASED
 
 
     HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
     HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
 
 
+    HADOOP-7664. Remove warmings when overriding final parameter configuration
+    if the override value is same as the final parameter value.
+    (Ravi Prakash via suresh)
+
+    HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
+    via jing9)
+
+    HADOOP-10845. Add common tests for ACLs in combination with viewfs.
+    (Stephen Chu via cnauroth)
+
+    HADOOP-10839. Add unregisterSource() to MetricsSystem API.
+    (Shanyu Zhao via cnauroth)
+
+    HADOOP-10607. Create an API to separate credentials/password storage
+    from applications (Larry McCay via omalley)
+
+    HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
+
+    HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
+    omalley)
+
+    HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
+    (Ted Malaska via atm)
+
+    HADOOP-10817. ProxyUsers configuration should support configurable 
+    prefixes. (tucu)
+
+    HADOOP-10755. Support negative caching of user-group mapping.
+    (Lei Xu via wang)
+
+    HADOOP-10855. Allow Text to be read with a known Length. (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -416,6 +467,19 @@ Release 2.6.0 - UNRELEASED
 
 
     HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
     HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
 
 
+    HADOOP-9921. daemon scripts should remove pid file on stop call after stop
+    or process is found not running ( vinayakumarb )
+
+    HADOOP-10591.  Compression codecs must used pooled direct buffers or
+    deallocate direct buffers when stream is closed (cmccabe)
+
+    HADOOP-10857.  Native Libraries Guide doen't mention a dependency on
+    openssl-development package (ozawa via cmccabe)
+
+    HADOOP-10866. RawLocalFileSystem fails to read symlink targets via the stat
+    command when the format of the stat command uses non-curly quotes (yzhang
+    via cmccabe)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -738,6 +802,9 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10710. hadoop.auth cookie is not properly constructed according to 
     HADOOP-10710. hadoop.auth cookie is not properly constructed according to 
     RFC2109. (Juan Yu via tucu)
     RFC2109. (Juan Yu via tucu)
 
 
+    HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
+    via Arpit Agarwal)
+
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh

@@ -198,6 +198,7 @@ case $startStop in
       else
       else
         echo no $command to stop
         echo no $command to stop
       fi
       fi
+      rm -f $pid
     else
     else
       echo no $command to stop
       echo no $command to stop
     fi
     fi

+ 174 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java

@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+/**
+ * A <code>KeyProviderExtension</code> implementation providing a short lived
+ * cache for <code>KeyVersions</code> and <code>Metadata</code>to avoid burst
+ * of requests to hit the underlying <code>KeyProvider</code>.
+ */
+public class CachingKeyProvider extends
+    KeyProviderExtension<CachingKeyProvider.CacheExtension> {
+
+  static class CacheExtension implements KeyProviderExtension.Extension {
+    private final KeyProvider provider;
+    private LoadingCache<String, KeyVersion> keyVersionCache;
+    private LoadingCache<String, KeyVersion> currentKeyCache;
+    private LoadingCache<String, Metadata> keyMetadataCache;
+
+    CacheExtension(KeyProvider prov, long keyTimeoutMillis,
+        long currKeyTimeoutMillis) {
+      this.provider = prov;
+      keyVersionCache =
+          CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
+              TimeUnit.MILLISECONDS)
+              .build(new CacheLoader<String, KeyVersion>() {
+                @Override
+                public KeyVersion load(String key) throws Exception {
+                  KeyVersion kv = provider.getKeyVersion(key);
+                  if (kv == null) {
+                    throw new KeyNotFoundException();
+                  }
+                  return kv;
+                }
+              });
+      keyMetadataCache =
+          CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
+              TimeUnit.MILLISECONDS)
+              .build(new CacheLoader<String, Metadata>() {
+                @Override
+                public Metadata load(String key) throws Exception {
+                  Metadata meta = provider.getMetadata(key);
+                  if (meta == null) {
+                    throw new KeyNotFoundException();
+                  }
+                  return meta;
+                }
+              });
+      currentKeyCache =
+          CacheBuilder.newBuilder().expireAfterWrite(currKeyTimeoutMillis,
+          TimeUnit.MILLISECONDS)
+          .build(new CacheLoader<String, KeyVersion>() {
+            @Override
+            public KeyVersion load(String key) throws Exception {
+              KeyVersion kv = provider.getCurrentKey(key);
+              if (kv == null) {
+                throw new KeyNotFoundException();
+              }
+              return kv;
+            }
+          });
+    }
+  }
+
+  @SuppressWarnings("serial")
+  private static class KeyNotFoundException extends Exception { }
+
+  public CachingKeyProvider(KeyProvider keyProvider, long keyTimeoutMillis,
+      long currKeyTimeoutMillis) {
+    super(keyProvider, new CacheExtension(keyProvider, keyTimeoutMillis,
+        currKeyTimeoutMillis));
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    try {
+      return getExtension().currentKeyCache.get(name);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName)
+      throws IOException {
+    try {
+      return getExtension().keyVersionCache.get(versionName);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    getKeyProvider().deleteKey(name);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    // invalidating all key versions as we don't know
+    // which ones belonged to the deleted key
+    getExtension().keyVersionCache.invalidateAll();
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    KeyVersion key = getKeyProvider().rollNewVersion(name, material);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    KeyVersion key = getKeyProvider().rollNewVersion(name);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    try {
+      return getExtension().keyMetadataCache.get(name);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+}

+ 166 - 69
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -21,52 +21,117 @@ package org.apache.hadoop.crypto.key;
 import java.io.IOException;
 import java.io.IOException;
 import java.security.GeneralSecurityException;
 import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
 import java.security.SecureRandom;
-
 import javax.crypto.Cipher;
 import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 import javax.crypto.spec.SecretKeySpec;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
 
 
 /**
 /**
- * A KeyProvider with Cytographic Extensions specifically for generating
- * Encrypted Keys as well as decrypting them
+ * A KeyProvider with Cryptographic Extensions specifically for generating
+ * and decrypting encrypted encryption keys.
  * 
  * 
  */
  */
+@InterfaceAudience.Private
 public class KeyProviderCryptoExtension extends
 public class KeyProviderCryptoExtension extends
     KeyProviderExtension<KeyProviderCryptoExtension.CryptoExtension> {
     KeyProviderExtension<KeyProviderCryptoExtension.CryptoExtension> {
 
 
-  protected static final String EEK = "EEK";
-  protected static final String EK = "EK";
+  /**
+   * Designates an encrypted encryption key, or EEK.
+   */
+  public static final String EEK = "EEK";
+  /**
+   * Designates a decrypted encrypted encryption key, that is, an encryption key
+   * (EK).
+   */
+  public static final String EK = "EK";
 
 
   /**
   /**
-   * This is a holder class whose instance contains the keyVersionName, iv
-   * used to generate the encrypted Key and the encrypted KeyVersion
+   * An encrypted encryption key (EEK) and related information. An EEK must be
+   * decrypted using the key's encryption key before it can be used.
    */
    */
   public static class EncryptedKeyVersion {
   public static class EncryptedKeyVersion {
-    private String keyVersionName;
-    private byte[] iv;
-    private KeyVersion encryptedKey;
-
-    protected EncryptedKeyVersion(String keyVersionName, byte[] iv,
-        KeyVersion encryptedKey) {
-      this.keyVersionName = keyVersionName;
-      this.iv = iv;
-      this.encryptedKey = encryptedKey;
+    private String encryptionKeyName;
+    private String encryptionKeyVersionName;
+    private byte[] encryptedKeyIv;
+    private KeyVersion encryptedKeyVersion;
+
+    /**
+     * Create a new EncryptedKeyVersion.
+     *
+     * @param keyName                  Name of the encryption key used to
+     *                                 encrypt the encrypted key.
+     * @param encryptionKeyVersionName Version name of the encryption key used
+     *                                 to encrypt the encrypted key.
+     * @param encryptedKeyIv           Initialization vector of the encrypted
+     *                                 key. The IV of the encryption key used to
+     *                                 encrypt the encrypted key is derived from
+     *                                 this IV.
+     * @param encryptedKeyVersion      The encrypted encryption key version.
+     */
+    protected EncryptedKeyVersion(String keyName,
+        String encryptionKeyVersionName, byte[] encryptedKeyIv,
+        KeyVersion encryptedKeyVersion) {
+      this.encryptionKeyName = keyName;
+      this.encryptionKeyVersionName = encryptionKeyVersionName;
+      this.encryptedKeyIv = encryptedKeyIv;
+      this.encryptedKeyVersion = encryptedKeyVersion;
+    }
+
+    /**
+     * @return Name of the encryption key used to encrypt the encrypted key.
+     */
+    public String getEncryptionKeyName() {
+      return encryptionKeyName;
     }
     }
 
 
-    public String getKeyVersionName() {
-      return keyVersionName;
+    /**
+     * @return Version name of the encryption key used to encrypt the encrypted
+     * key.
+     */
+    public String getEncryptionKeyVersionName() {
+      return encryptionKeyVersionName;
     }
     }
 
 
-    public byte[] getIv() {
-      return iv;
+    /**
+     * @return Initialization vector of the encrypted key. The IV of the
+     * encryption key used to encrypt the encrypted key is derived from this
+     * IV.
+     */
+    public byte[] getEncryptedKeyIv() {
+      return encryptedKeyIv;
     }
     }
 
 
-    public KeyVersion getEncryptedKey() {
-      return encryptedKey;
+    /**
+     * @return The encrypted encryption key version.
+     */
+    public KeyVersion getEncryptedKeyVersion() {
+      return encryptedKeyVersion;
     }
     }
 
 
+    /**
+     * Derive the initialization vector (IV) for the encryption key from the IV
+     * of the encrypted key. This derived IV is used with the encryption key to
+     * decrypt the encrypted key.
+     * <p/>
+     * The alternative to this is using the same IV for both the encryption key
+     * and the encrypted key. Even a simple symmetric transformation like this
+     * improves security by avoiding IV re-use. IVs will also be fairly unique
+     * among different EEKs.
+     *
+     * @param encryptedKeyIV of the encrypted key (i.e. {@link
+     * #getEncryptedKeyIv()})
+     * @return IV for the encryption key
+     */
+    protected static byte[] deriveIV(byte[] encryptedKeyIV) {
+      byte[] rIv = new byte[encryptedKeyIV.length];
+      // Do a simple XOR transformation to flip all the bits
+      for (int i = 0; i < encryptedKeyIV.length; i++) {
+        rIv[i] = (byte) (encryptedKeyIV[i] ^ 0xff);
+      }
+      return rIv;
+    }
   }
   }
 
 
   /**
   /**
@@ -75,17 +140,24 @@ public class KeyProviderCryptoExtension extends
    */
    */
   public interface CryptoExtension extends KeyProviderExtension.Extension {
   public interface CryptoExtension extends KeyProviderExtension.Extension {
 
 
+    /**
+     * Calls to this method allows the underlying KeyProvider to warm-up any
+     * implementation specific caches used to store the Encrypted Keys.
+     * @param keyNames Array of Key Names
+     */
+    public void warmUpEncryptedKeys(String... keyNames)
+        throws IOException;
+
     /**
     /**
      * Generates a key material and encrypts it using the given key version name
      * Generates a key material and encrypts it using the given key version name
      * and initialization vector. The generated key material is of the same
      * and initialization vector. The generated key material is of the same
-     * length as the <code>KeyVersion</code> material and is encrypted using the
-     * same cipher.
+     * length as the <code>KeyVersion</code> material of the latest key version
+     * of the key and is encrypted using the same cipher.
      * <p/>
      * <p/>
      * NOTE: The generated key is not stored by the <code>KeyProvider</code>
      * NOTE: The generated key is not stored by the <code>KeyProvider</code>
      * 
      * 
-     * @param encryptionKeyVersion
-     *          a KeyVersion object containing the keyVersion name and material
-     *          to encrypt.
+     * @param encryptionKeyName
+     *          The latest KeyVersion of this key's material will be encrypted.
      * @return EncryptedKeyVersion with the generated key material, the version
      * @return EncryptedKeyVersion with the generated key material, the version
      *         name is 'EEK' (for Encrypted Encryption Key)
      *         name is 'EEK' (for Encrypted Encryption Key)
      * @throws IOException
      * @throws IOException
@@ -95,7 +167,7 @@ public class KeyProviderCryptoExtension extends
      *           cryptographic issue.
      *           cryptographic issue.
      */
      */
     public EncryptedKeyVersion generateEncryptedKey(
     public EncryptedKeyVersion generateEncryptedKey(
-        KeyVersion encryptionKeyVersion) throws IOException,
+        String encryptionKeyName) throws IOException,
         GeneralSecurityException;
         GeneralSecurityException;
 
 
     /**
     /**
@@ -126,62 +198,87 @@ public class KeyProviderCryptoExtension extends
       this.keyProvider = keyProvider;
       this.keyProvider = keyProvider;
     }
     }
 
 
-    // the IV used to encrypt a EK typically will be the same IV used to
-    // encrypt data with the EK. To avoid any chance of weakening the 
-    // encryption because the same IV is used, we simply XOR the IV thus we 
-    // are not using the same IV for 2 different encryptions (even if they 
-    // are done using different keys)
-    private byte[] flipIV(byte[] iv) {
-      byte[] rIv = new byte[iv.length];
-      for (int i = 0; i < iv.length; i++) {
-        rIv[i] = (byte) (iv[i] ^ 0xff);
-      }
-      return rIv;
-    }
-
     @Override
     @Override
-    public EncryptedKeyVersion generateEncryptedKey(KeyVersion keyVersion)
+    public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
         throws IOException, GeneralSecurityException {
         throws IOException, GeneralSecurityException {
-      KeyVersion keyVer =
-          keyProvider.getKeyVersion(keyVersion.getVersionName());
-      Preconditions.checkNotNull(keyVer, "KeyVersion name '%s' does not exist",
-          keyVersion.getVersionName());
-      byte[] newKey = new byte[keyVer.getMaterial().length];
-      SecureRandom.getInstance("SHA1PRNG").nextBytes(newKey);
+      // Fetch the encryption key
+      KeyVersion encryptionKey = keyProvider.getCurrentKey(encryptionKeyName);
+      Preconditions.checkNotNull(encryptionKey,
+          "No KeyVersion exists for key '%s' ", encryptionKeyName);
+      // Generate random bytes for new key and IV
       Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
       Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
-      byte[] iv = SecureRandom.getSeed(cipher.getBlockSize());
-      cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(keyVer.getMaterial(),
-          "AES"), new IvParameterSpec(flipIV(iv)));
-      byte[] ek = cipher.doFinal(newKey);
-      return new EncryptedKeyVersion(keyVersion.getVersionName(), iv,
-          new KeyVersion(keyVer.getName(), EEK, ek));
+      SecureRandom random = SecureRandom.getInstance("SHA1PRNG");
+      final byte[] newKey = new byte[encryptionKey.getMaterial().length];
+      random.nextBytes(newKey);
+      final byte[] iv = random.generateSeed(cipher.getBlockSize());
+      // Encryption key IV is derived from new key's IV
+      final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
+      // Encrypt the new key
+      cipher.init(Cipher.ENCRYPT_MODE,
+          new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
+          new IvParameterSpec(encryptionIV));
+      final byte[] encryptedKey = cipher.doFinal(newKey);
+      return new EncryptedKeyVersion(encryptionKeyName,
+          encryptionKey.getVersionName(), iv,
+          new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
     }
     }
 
 
     @Override
     @Override
     public KeyVersion decryptEncryptedKey(
     public KeyVersion decryptEncryptedKey(
         EncryptedKeyVersion encryptedKeyVersion) throws IOException,
         EncryptedKeyVersion encryptedKeyVersion) throws IOException,
         GeneralSecurityException {
         GeneralSecurityException {
-      KeyVersion keyVer =
-          keyProvider.getKeyVersion(encryptedKeyVersion.getKeyVersionName());
-      Preconditions.checkNotNull(keyVer, "KeyVersion name '%s' does not exist",
-          encryptedKeyVersion.getKeyVersionName());
-      KeyVersion keyVersion = encryptedKeyVersion.getEncryptedKey();
+      // Fetch the encryption key material
+      final String encryptionKeyVersionName =
+          encryptedKeyVersion.getEncryptionKeyVersionName();
+      final KeyVersion encryptionKey =
+          keyProvider.getKeyVersion(encryptionKeyVersionName);
+      Preconditions.checkNotNull(encryptionKey,
+          "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
+      final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
+      // Encryption key IV is determined from encrypted key's IV
+      final byte[] encryptionIV =
+          EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
+      // Init the cipher with encryption key parameters
       Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
       Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
       cipher.init(Cipher.DECRYPT_MODE,
       cipher.init(Cipher.DECRYPT_MODE,
-          new SecretKeySpec(keyVersion.getMaterial(), "AES"),
-          new IvParameterSpec(flipIV(encryptedKeyVersion.getIv())));
-      byte[] ek =
-          cipher.doFinal(encryptedKeyVersion.getEncryptedKey().getMaterial());
-      return new KeyVersion(keyVer.getName(), EK, ek);
+          new SecretKeySpec(encryptionKeyMaterial, "AES"),
+          new IvParameterSpec(encryptionIV));
+      // Decrypt the encrypted key
+      final KeyVersion encryptedKV =
+          encryptedKeyVersion.getEncryptedKeyVersion();
+      final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial());
+      return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
+    }
+
+    @Override
+    public void warmUpEncryptedKeys(String... keyNames)
+        throws IOException {
+      // NO-OP since the default version does not cache any keys
     }
     }
 
 
   }
   }
 
 
-  private KeyProviderCryptoExtension(KeyProvider keyProvider,
+  /**
+   * This constructor is to be used by sub classes that provide
+   * delegating/proxying functionality to the {@link KeyProviderCryptoExtension}
+   * @param keyProvider
+   * @param extension
+   */
+  protected KeyProviderCryptoExtension(KeyProvider keyProvider,
       CryptoExtension extension) {
       CryptoExtension extension) {
     super(keyProvider, extension);
     super(keyProvider, extension);
   }
   }
 
 
+  /**
+   * Notifies the Underlying CryptoExtension implementation to warm up any
+   * implementation specific caches for the specified KeyVersions
+   * @param keyNames Arrays of key Names
+   */
+  public void warmUpEncryptedKeys(String... keyNames)
+      throws IOException {
+    getExtension().warmUpEncryptedKeys(keyNames);
+  }
+
   /**
   /**
    * Generates a key material and encrypts it using the given key version name
    * Generates a key material and encrypts it using the given key version name
    * and initialization vector. The generated key material is of the same
    * and initialization vector. The generated key material is of the same
@@ -190,18 +287,18 @@ public class KeyProviderCryptoExtension extends
    * <p/>
    * <p/>
    * NOTE: The generated key is not stored by the <code>KeyProvider</code>
    * NOTE: The generated key is not stored by the <code>KeyProvider</code>
    *
    *
-   * @param encryptionKey a KeyVersion object containing the keyVersion name and 
-   * material to encrypt.
+   * @param encryptionKeyName The latest KeyVersion of this key's material will
+   * be encrypted.
    * @return EncryptedKeyVersion with the generated key material, the version
    * @return EncryptedKeyVersion with the generated key material, the version
    * name is 'EEK' (for Encrypted Encryption Key)
    * name is 'EEK' (for Encrypted Encryption Key)
    * @throws IOException thrown if the key material could not be generated
    * @throws IOException thrown if the key material could not be generated
    * @throws GeneralSecurityException thrown if the key material could not be 
    * @throws GeneralSecurityException thrown if the key material could not be 
    * encrypted because of a cryptographic issue.
    * encrypted because of a cryptographic issue.
    */
    */
-  public EncryptedKeyVersion generateEncryptedKey(KeyVersion encryptionKey) 
+  public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
       throws IOException,
       throws IOException,
                                            GeneralSecurityException {
                                            GeneralSecurityException {
-    return getExtension().generateEncryptedKey(encryptionKey);
+    return getExtension().generateEncryptedKey(encryptionKeyName);
   }
   }
 
 
   /**
   /**

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.ServiceLoader;
 import java.util.ServiceLoader;
 
 
@@ -47,6 +48,15 @@ public abstract class KeyProviderFactory {
   private static final ServiceLoader<KeyProviderFactory> serviceLoader =
   private static final ServiceLoader<KeyProviderFactory> serviceLoader =
       ServiceLoader.load(KeyProviderFactory.class);
       ServiceLoader.load(KeyProviderFactory.class);
 
 
+  // Iterate through the serviceLoader to avoid lazy loading.
+  // Lazy loading would require synchronization in concurrent use cases.
+  static {
+    Iterator<KeyProviderFactory> iterServices = serviceLoader.iterator();
+    while (iterServices.hasNext()) {
+      iterServices.next();
+    }
+  }
+  
   public static List<KeyProvider> getProviders(Configuration conf
   public static List<KeyProvider> getProviders(Configuration conf
                                                ) throws IOException {
                                                ) throws IOException {
     List<KeyProvider> result = new ArrayList<KeyProvider>();
     List<KeyProvider> result = new ArrayList<KeyProvider>();

+ 26 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -57,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
 
 
   private boolean userSuppliedProvider = false;
   private boolean userSuppliedProvider = false;
 
 
+  /**
+   * Primary entry point for the KeyShell; called via main().
+   *
+   * @param args Command line arguments.
+   * @return 0 on success and 1 on failure.  This value is passed back to
+   * the unix shell, so we must follow shell return code conventions:
+   * the return code is an unsigned character, and 0 means success, and
+   * small positive integers mean failure.
+   * @throws Exception
+   */
   @Override
   @Override
   public int run(String[] args) throws Exception {
   public int run(String[] args) throws Exception {
     int exitCode = 0;
     int exitCode = 0;
@@ -68,11 +78,11 @@ public class KeyShell extends Configured implements Tool {
       if (command.validate()) {
       if (command.validate()) {
           command.execute();
           command.execute();
       } else {
       } else {
-        exitCode = -1;
+        exitCode = 1;
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
       e.printStackTrace(err);
       e.printStackTrace(err);
-      return -1;
+      return 1;
     }
     }
     return exitCode;
     return exitCode;
   }
   }
@@ -86,8 +96,8 @@ public class KeyShell extends Configured implements Tool {
    * % hadoop key list [-provider providerPath]
    * % hadoop key list [-provider providerPath]
    * % hadoop key delete keyName [--provider providerPath] [-i]
    * % hadoop key delete keyName [--provider providerPath] [-i]
    * </pre>
    * </pre>
-   * @param args
-   * @return
+   * @param args Command line arguments.
+   * @return 0 on success, 1 on failure.
    * @throws IOException
    * @throws IOException
    */
    */
   private int init(String[] args) throws IOException {
   private int init(String[] args) throws IOException {
@@ -105,7 +115,7 @@ public class KeyShell extends Configured implements Tool {
         command = new CreateCommand(keyName, options);
         command = new CreateCommand(keyName, options);
         if ("--help".equals(keyName)) {
         if ("--help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
         }
       } else if (args[i].equals("delete")) {
       } else if (args[i].equals("delete")) {
         String keyName = "--help";
         String keyName = "--help";
@@ -116,7 +126,7 @@ public class KeyShell extends Configured implements Tool {
         command = new DeleteCommand(keyName);
         command = new DeleteCommand(keyName);
         if ("--help".equals(keyName)) {
         if ("--help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
         }
       } else if (args[i].equals("roll")) {
       } else if (args[i].equals("roll")) {
         String keyName = "--help";
         String keyName = "--help";
@@ -127,7 +137,7 @@ public class KeyShell extends Configured implements Tool {
         command = new RollCommand(keyName);
         command = new RollCommand(keyName);
         if ("--help".equals(keyName)) {
         if ("--help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
         }
       } else if ("list".equals(args[i])) {
       } else if ("list".equals(args[i])) {
         command = new ListCommand();
         command = new ListCommand();
@@ -145,13 +155,13 @@ public class KeyShell extends Configured implements Tool {
           out.println("\nAttributes must be in attribute=value form, " +
           out.println("\nAttributes must be in attribute=value form, " +
                   "or quoted\nlike \"attribute = value\"\n");
                   "or quoted\nlike \"attribute = value\"\n");
           printKeyShellUsage();
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
         }
         if (attributes.containsKey(attr)) {
         if (attributes.containsKey(attr)) {
           out.println("\nEach attribute must correspond to only one value:\n" +
           out.println("\nEach attribute must correspond to only one value:\n" +
                   "atttribute \"" + attr + "\" was repeated\n" );
                   "atttribute \"" + attr + "\" was repeated\n" );
           printKeyShellUsage();
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
         }
         attributes.put(attr, val);
         attributes.put(attr, val);
       } else if ("--provider".equals(args[i]) && moreTokens) {
       } else if ("--provider".equals(args[i]) && moreTokens) {
@@ -163,17 +173,17 @@ public class KeyShell extends Configured implements Tool {
         interactive = true;
         interactive = true;
       } else if ("--help".equals(args[i])) {
       } else if ("--help".equals(args[i])) {
         printKeyShellUsage();
         printKeyShellUsage();
-        return -1;
+        return 1;
       } else {
       } else {
         printKeyShellUsage();
         printKeyShellUsage();
         ToolRunner.printGenericCommandUsage(System.err);
         ToolRunner.printGenericCommandUsage(System.err);
-        return -1;
+        return 1;
       }
       }
     }
     }
 
 
     if (command == null) {
     if (command == null) {
       printKeyShellUsage();
       printKeyShellUsage();
-      return -1;
+      return 1;
     }
     }
 
 
     if (!attributes.isEmpty()) {
     if (!attributes.isEmpty()) {
@@ -491,10 +501,11 @@ public class KeyShell extends Configured implements Tool {
   }
   }
 
 
   /**
   /**
-   * Main program.
+   * main() entry point for the KeyShell.  While strictly speaking the
+   * return is void, it will System.exit() with a return code: 0 is for
+   * success and 1 for failure.
    *
    *
-   * @param args
-   *          Command line arguments
+   * @param args Command line arguments.
    * @throws Exception
    * @throws Exception
    */
    */
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {

+ 160 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -21,7 +21,9 @@ import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
@@ -33,6 +35,7 @@ import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.HttpsURLConnection;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
@@ -40,6 +43,7 @@ import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.io.Writer;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Constructor;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
@@ -50,14 +54,22 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
+
+import com.google.common.base.Preconditions;
 
 
 /**
 /**
  * KMS client <code>KeyProvider</code> implementation.
  * KMS client <code>KeyProvider</code> implementation.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class KMSClientProvider extends KeyProvider {
+public class KMSClientProvider extends KeyProvider implements CryptoExtension {
 
 
   public static final String SCHEME_NAME = "kms";
   public static final String SCHEME_NAME = "kms";
 
 
@@ -78,6 +90,73 @@ public class KMSClientProvider extends KeyProvider {
   public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
   public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
   public static final int DEFAULT_TIMEOUT = 60;
   public static final int DEFAULT_TIMEOUT = 60;
 
 
+  private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
+
+  private class EncryptedQueueRefiller implements
+    ValueQueue.QueueRefiller<EncryptedKeyVersion> {
+
+    @Override
+    public void fillQueueForKey(String keyName,
+        Queue<EncryptedKeyVersion> keyQueue, int numEKVs) throws IOException {
+      checkNotNull(keyName, "keyName");
+      Map<String, String> params = new HashMap<String, String>();
+      params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_GENERATE);
+      params.put(KMSRESTConstants.EEK_NUM_KEYS, "" + numEKVs);
+      URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName,
+          KMSRESTConstants.EEK_SUB_RESOURCE, params);
+      HttpURLConnection conn = createConnection(url, HTTP_GET);
+      conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+      List response = call(conn, null,
+          HttpURLConnection.HTTP_OK, List.class);
+      List<EncryptedKeyVersion> ekvs =
+          parseJSONEncKeyVersion(keyName, response);
+      keyQueue.addAll(ekvs);
+    }
+  }
+
+  public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion {
+    public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
+        byte[] iv, String encryptedVersionName, byte[] keyMaterial) {
+      super(keyName, keyVersionName, iv, new KMSKeyVersion(null, 
+          encryptedVersionName, keyMaterial));
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  private static List<EncryptedKeyVersion>
+      parseJSONEncKeyVersion(String keyName, List valueList) {
+    List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
+    if (!valueList.isEmpty()) {
+      for (Object values : valueList) {
+        Map valueMap = (Map) values;
+
+        String versionName = checkNotNull(
+                (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+                KMSRESTConstants.VERSION_NAME_FIELD);
+
+        byte[] iv = Base64.decodeBase64(checkNotNull(
+                (String) valueMap.get(KMSRESTConstants.IV_FIELD),
+                KMSRESTConstants.IV_FIELD));
+
+        Map encValueMap = checkNotNull((Map)
+                valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
+                KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
+
+        String encVersionName = checkNotNull((String)
+                encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+                KMSRESTConstants.VERSION_NAME_FIELD);
+
+        byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
+                encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
+                KMSRESTConstants.MATERIAL_FIELD));
+
+        ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
+            encVersionName, encKeyMaterial));
+      }
+    }
+    return ekvs;
+  }
+
   private static KeyVersion parseJSONKeyVersion(Map valueMap) {
   private static KeyVersion parseJSONKeyVersion(Map valueMap) {
     KeyVersion keyVersion = null;
     KeyVersion keyVersion = null;
     if (!valueMap.isEmpty()) {
     if (!valueMap.isEmpty()) {
@@ -208,6 +287,28 @@ public class KMSClientProvider extends KeyProvider {
     }
     }
     int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
     int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
     configurator = new TimeoutConnConfigurator(timeout, sslFactory);
     configurator = new TimeoutConnConfigurator(timeout, sslFactory);
+    encKeyVersionQueue =
+        new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
+            conf.getInt(
+                CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
+            conf.getFloat(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
+            conf.getInt(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
+            conf.getInt(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
+            new EncryptedQueueRefiller());
   }
   }
 
 
   private String createServiceURL(URL url) throws IOException {
   private String createServiceURL(URL url) throws IOException {
@@ -527,6 +628,54 @@ public class KMSClientProvider extends KeyProvider {
     }
     }
   }
   }
 
 
+  @Override
+  public EncryptedKeyVersion generateEncryptedKey(
+      String encryptionKeyName) throws IOException, GeneralSecurityException {
+    try {
+      return encKeyVersionQueue.getNext(encryptionKeyName);
+    } catch (ExecutionException e) {
+      if (e.getCause() instanceof SocketTimeoutException) {
+        throw (SocketTimeoutException)e.getCause();
+      }
+      throw new IOException(e);
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Override
+  public KeyVersion decryptEncryptedKey(
+      EncryptedKeyVersion encryptedKeyVersion) throws IOException,
+                                                      GeneralSecurityException {
+    checkNotNull(encryptedKeyVersion.getEncryptionKeyVersionName(),
+        "versionName");
+    checkNotNull(encryptedKeyVersion.getEncryptedKeyIv(), "iv");
+    Preconditions.checkArgument(
+        encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+            .equals(KeyProviderCryptoExtension.EEK),
+        "encryptedKey version name must be '%s', is '%s'",
+        KeyProviderCryptoExtension.EK,
+        encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+    );
+    checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_DECRYPT);
+    Map<String, Object> jsonPayload = new HashMap<String, Object>();
+    jsonPayload.put(KMSRESTConstants.NAME_FIELD,
+        encryptedKeyVersion.getEncryptionKeyName());
+    jsonPayload.put(KMSRESTConstants.IV_FIELD, Base64.encodeBase64String(
+        encryptedKeyVersion.getEncryptedKeyIv()));
+    jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(
+            encryptedKeyVersion.getEncryptedKeyVersion().getMaterial()));
+    URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
+        encryptedKeyVersion.getEncryptionKeyVersionName(),
+        KMSRESTConstants.EEK_SUB_RESOURCE, params);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response =
+        call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
   @Override
   @Override
   public List<KeyVersion> getKeyVersions(String name) throws IOException {
   public List<KeyVersion> getKeyVersions(String name) throws IOException {
     checkNotEmpty(name, "name");
     checkNotEmpty(name, "name");
@@ -570,4 +719,14 @@ public class KMSClientProvider extends KeyProvider {
     // the server should not keep in memory state on behalf of clients either.
     // the server should not keep in memory state on behalf of clients either.
   }
   }
 
 
+  @Override
+  public void warmUpEncryptedKeys(String... keyNames)
+      throws IOException {
+    try {
+      encKeyVersionQueue.initializeQueuesForKeys(keyNames);
+    } catch (ExecutionException e) {
+      throw new IOException(e);
+    }
+  }
+
 }
 }

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -34,10 +34,16 @@ public class KMSRESTConstants {
   public static final String KEY_VERSION_RESOURCE = "keyversion";
   public static final String KEY_VERSION_RESOURCE = "keyversion";
   public static final String METADATA_SUB_RESOURCE = "_metadata";
   public static final String METADATA_SUB_RESOURCE = "_metadata";
   public static final String VERSIONS_SUB_RESOURCE = "_versions";
   public static final String VERSIONS_SUB_RESOURCE = "_versions";
+  public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
 
 
   public static final String KEY_OP = "key";
   public static final String KEY_OP = "key";
+  public static final String EEK_OP = "eek_op";
+  public static final String EEK_GENERATE = "generate";
+  public static final String EEK_DECRYPT = "decrypt";
+  public static final String EEK_NUM_KEYS = "num_keys";
 
 
+  public static final String IV_FIELD = "iv";
   public static final String NAME_FIELD = "name";
   public static final String NAME_FIELD = "name";
   public static final String CIPHER_FIELD = "cipher";
   public static final String CIPHER_FIELD = "cipher";
   public static final String LENGTH_FIELD = "length";
   public static final String LENGTH_FIELD = "length";
@@ -47,6 +53,8 @@ public class KMSRESTConstants {
   public static final String VERSIONS_FIELD = "versions";
   public static final String VERSIONS_FIELD = "versions";
   public static final String MATERIAL_FIELD = "material";
   public static final String MATERIAL_FIELD = "material";
   public static final String VERSION_NAME_FIELD = "versionName";
   public static final String VERSION_NAME_FIELD = "versionName";
+  public static final String ENCRYPTED_KEY_VERSION_FIELD =
+      "encryptedKeyVersion";
 
 
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_MESSAGE_JSON = "message";
   public static final String ERROR_MESSAGE_JSON = "message";

+ 317 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A Utility class that maintains a Queue of entries for a given key. It tries
+ * to ensure that there is are always at-least <code>numValues</code> entries
+ * available for the client to consume for a particular key.
+ * It also uses an underlying Cache to evict queues for keys that have not been
+ * accessed for a configurable period of time.
+ * Implementing classes are required to implement the
+ * <code>QueueRefiller</code> interface that exposes a method to refill the
+ * queue, when empty
+ */
+@InterfaceAudience.Private
+public class ValueQueue <E> {
+
+  /**
+   * QueueRefiller interface a client must implement to use this class
+   */
+  public interface QueueRefiller <E> {
+    /**
+     * Method that has to be implemented by implementing classes to fill the
+     * Queue.
+     * @param keyName Key name
+     * @param keyQueue Queue that needs to be filled
+     * @param numValues number of Values to be added to the queue.
+     * @throws IOException
+     */
+    public void fillQueueForKey(String keyName,
+        Queue<E> keyQueue, int numValues) throws IOException;
+  }
+
+  private static final String REFILL_THREAD =
+      ValueQueue.class.getName() + "_thread";
+
+  private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
+  private final ThreadPoolExecutor executor;
+  private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
+  private final QueueRefiller<E> refiller;
+  private final SyncGenerationPolicy policy;
+
+  private final int numValues;
+  private final float lowWatermark;
+
+  /**
+   * A <code>Runnable</code> which takes a string name.
+   */
+  private abstract static class NamedRunnable implements Runnable {
+    final String name;
+    private NamedRunnable(String keyName) {
+      this.name = keyName;
+    }
+  }
+
+  /**
+   * This backing blocking queue used in conjunction with the
+   * <code>ThreadPoolExecutor</code> used by the <code>ValueQueue</code>. This
+   * Queue accepts a task only if the task is not currently in the process
+   * of being run by a thread which is implied by the presence of the key
+   * in the <code>keysInProgress</code> set.
+   *
+   * NOTE: Only methods that ware explicitly called by the
+   * <code>ThreadPoolExecutor</code> need to be over-ridden.
+   */
+  private static class UniqueKeyBlockingQueue extends
+      LinkedBlockingQueue<Runnable> {
+
+    private static final long serialVersionUID = -2152747693695890371L;
+    private HashSet<String> keysInProgress = new HashSet<String>();
+
+    @Override
+    public synchronized void put(Runnable e) throws InterruptedException {
+      if (keysInProgress.add(((NamedRunnable)e).name)) {
+        super.put(e);
+      }
+    }
+
+    @Override
+    public Runnable take() throws InterruptedException {
+      Runnable k = super.take();
+      if (k != null) {
+        keysInProgress.remove(((NamedRunnable)k).name);
+      }
+      return k;
+    }
+
+    @Override
+    public Runnable poll(long timeout, TimeUnit unit)
+        throws InterruptedException {
+      Runnable k = super.poll(timeout, unit);
+      if (k != null) {
+        keysInProgress.remove(((NamedRunnable)k).name);
+      }
+      return k;
+    }
+
+  }
+
+  /**
+   * Policy to decide how many values to return to client when client asks for
+   * "n" values and Queue is empty.
+   * This decides how many values to return when client calls "getAtMost"
+   */
+  public static enum SyncGenerationPolicy {
+    ATLEAST_ONE, // Return atleast 1 value
+    LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
+    ALL // Return n values
+  }
+
+  /**
+   * Constructor takes the following tunable configuration parameters
+   * @param numValues The number of values cached in the Queue for a
+   *    particular key.
+   * @param lowWatermark The ratio of (number of current entries/numValues)
+   *    below which the <code>fillQueueForKey()</code> funciton will be
+   *    invoked to fill the Queue.
+   * @param expiry Expiry time after which the Key and associated Queue are
+   *    evicted from the cache.
+   * @param numFillerThreads Number of threads to use for the filler thread
+   * @param policy The SyncGenerationPolicy to use when client
+   *    calls "getAtMost"
+   * @param refiller implementation of the QueueRefiller
+   */
+  public ValueQueue(final int numValues, final float lowWatermark,
+      long expiry, int numFillerThreads, SyncGenerationPolicy policy,
+      final QueueRefiller<E> refiller) {
+    Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0");
+    Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)),
+        "\"lowWatermark\" must be > 0 and <= 1");
+    Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0");
+    Preconditions.checkArgument(numFillerThreads > 0,
+        "\"numFillerThreads\" must be > 0");
+    Preconditions.checkNotNull(policy, "\"policy\" must not be null");
+    this.refiller = refiller;
+    this.policy = policy;
+    this.numValues = numValues;
+    this.lowWatermark = lowWatermark;
+    keyQueues = CacheBuilder.newBuilder()
+            .expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
+            .build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
+                  @Override
+                  public LinkedBlockingQueue<E> load(String keyName)
+                      throws Exception {
+                    LinkedBlockingQueue<E> keyQueue =
+                        new LinkedBlockingQueue<E>();
+                    refiller.fillQueueForKey(keyName, keyQueue,
+                        (int)(lowWatermark * numValues));
+                    return keyQueue;
+                  }
+                });
+
+    executor =
+        new ThreadPoolExecutor(numFillerThreads, numFillerThreads, 0L,
+            TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
+                .setDaemon(true)
+                .setNameFormat(REFILL_THREAD).build());
+    // To ensure all requests are first queued, make coreThreads = maxThreads
+    // and pre-start all the Core Threads.
+    executor.prestartAllCoreThreads();
+  }
+
+  public ValueQueue(final int numValues, final float lowWaterMark, long expiry,
+      int numFillerThreads, QueueRefiller<E> fetcher) {
+    this(numValues, lowWaterMark, expiry, numFillerThreads,
+        SyncGenerationPolicy.ALL, fetcher);
+  }
+
+  /**
+   * Initializes the Value Queues for the provided keys by calling the
+   * fill Method with "numInitValues" values
+   * @param keyNames Array of key Names
+   * @throws ExecutionException
+   */
+  public void initializeQueuesForKeys(String... keyNames)
+      throws ExecutionException {
+    for (String keyName : keyNames) {
+      keyQueues.get(keyName);
+    }
+  }
+
+  /**
+   * This removes the value currently at the head of the Queue for the
+   * provided key. Will immediately fire the Queue filler function if key
+   * does not exist.
+   * If Queue exists but all values are drained, It will ask the generator
+   * function to add 1 value to Queue and then drain it.
+   * @param keyName String key name
+   * @return E the next value in the Queue
+   * @throws IOException
+   * @throws ExecutionException
+   */
+  public E getNext(String keyName)
+      throws IOException, ExecutionException {
+    return getAtMost(keyName, 1).get(0);
+  }
+
+  /**
+   * This removes the "num" values currently at the head of the Queue for the
+   * provided key. Will immediately fire the Queue filler function if key
+   * does not exist
+   * How many values are actually returned is governed by the
+   * <code>SyncGenerationPolicy</code> specified by the user.
+   * @param keyName String key name
+   * @param num Minimum number of values to return.
+   * @return List<E> values returned
+   * @throws IOException
+   * @throws ExecutionException
+   */
+  public List<E> getAtMost(String keyName, int num) throws IOException,
+      ExecutionException {
+    LinkedBlockingQueue<E> keyQueue = keyQueues.get(keyName);
+    // Using poll to avoid race condition..
+    LinkedList<E> ekvs = new LinkedList<E>();
+    try {
+      for (int i = 0; i < num; i++) {
+        E val = keyQueue.poll();
+        // If queue is empty now, Based on the provided SyncGenerationPolicy,
+        // figure out how many new values need to be generated synchronously
+        if (val == null) {
+          // Synchronous call to get remaining values
+          int numToFill = 0;
+          switch (policy) {
+          case ATLEAST_ONE:
+            numToFill = (ekvs.size() < 1) ? 1 : 0;
+            break;
+          case LOW_WATERMARK:
+            numToFill =
+                Math.min(num, (int) (lowWatermark * numValues)) - ekvs.size();
+            break;
+          case ALL:
+            numToFill = num - ekvs.size();
+            break;
+          }
+          // Synchronous fill if not enough values found
+          if (numToFill > 0) {
+            refiller.fillQueueForKey(keyName, ekvs, numToFill);
+          }
+          // Asynch task to fill > lowWatermark
+          if (i <= (int) (lowWatermark * numValues)) {
+            submitRefillTask(keyName, keyQueue);
+          }
+          return ekvs;
+        }
+        ekvs.add(val);
+      }
+    } catch (Exception e) {
+      throw new IOException("Exeption while contacting value generator ", e);
+    }
+    return ekvs;
+  }
+
+  private void submitRefillTask(final String keyName,
+      final Queue<E> keyQueue) throws InterruptedException {
+    // The submit/execute method of the ThreadPoolExecutor is bypassed and
+    // the Runnable is directly put in the backing BlockingQueue so that we
+    // can control exactly how the runnable is inserted into the queue.
+    queue.put(
+        new NamedRunnable(keyName) {
+          @Override
+          public void run() {
+            int cacheSize = numValues;
+            int threshold = (int) (lowWatermark * (float) cacheSize);
+            // Need to ensure that only one refill task per key is executed
+            try {
+              if (keyQueue.size() < threshold) {
+                refiller.fillQueueForKey(name, keyQueue,
+                    cacheSize - keyQueue.size());
+              }
+            } catch (final Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        }
+        );
+  }
+
+  /**
+   * Cleanly shutdown
+   */
+  public void shutdown() {
+    executor.shutdownNow();
+  }
+
+}

+ 33 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -250,6 +250,12 @@ public class CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
   public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
     300;
     300;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS =
+    "hadoop.security.groups.negative-cache.secs";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final long HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT =
+    30;
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
   public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
     "hadoop.security.groups.cache.warn.after.ms";
     "hadoop.security.groups.cache.warn.after.ms";
   public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
   public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
@@ -285,5 +291,32 @@ public class CommonConfigurationKeysPublic {
   /** Class to override Impersonation provider */
   /** Class to override Impersonation provider */
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
     "hadoop.security.impersonation.provider.class";
     "hadoop.security.impersonation.provider.class";
+
+  //  <!--- KMSClientProvider configurations —>
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
+      "hadoop.security.kms.client.encrypted.key.cache.size";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK =
+      "hadoop.security.kms.client.encrypted.key.cache.low-watermark";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */
+  public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT =
+      0.3f;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS =
+      "hadoop.security.kms.client.encrypted.key.cache.num.refill.threads";
+  /** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT =
+      2;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS =
+      "hadoop.security.kms.client.encrypted.key.cache.expiry";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
 }
 }
 
 

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java

@@ -128,6 +128,8 @@ public class Stat extends Shell {
           " link " + original);
           " link " + original);
     }
     }
     // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
     // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
+    // OR
+    // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,'link' -> 'target'
     StringTokenizer tokens = new StringTokenizer(line, ",");
     StringTokenizer tokens = new StringTokenizer(line, ",");
     try {
     try {
       long length = Long.parseLong(tokens.nextToken());
       long length = Long.parseLong(tokens.nextToken());
@@ -147,18 +149,17 @@ public class Stat extends Shell {
       String group = tokens.nextToken();
       String group = tokens.nextToken();
       String symStr = tokens.nextToken();
       String symStr = tokens.nextToken();
       // 'notalink'
       // 'notalink'
-      // 'link' -> `target'
+      // `link' -> `target' OR 'link' -> 'target'
       // '' -> ''
       // '' -> ''
       Path symlink = null;
       Path symlink = null;
-      StringTokenizer symTokens = new StringTokenizer(symStr, "`");
-      symTokens.nextToken();
+      String parts[] = symStr.split(" -> ");      
       try {
       try {
-        String target = symTokens.nextToken();
-        target = target.substring(0, target.length()-1);
+        String target = parts[1];
+        target = target.substring(1, target.length()-1);
         if (!target.isEmpty()) {
         if (!target.isEmpty()) {
           symlink = new Path(target);
           symlink = new Path(target);
         }
         }
-      } catch (NoSuchElementException e) {
+      } catch (ArrayIndexOutOfBoundsException e) {
         // null if not a symlink
         // null if not a symlink
       }
       }
       // Set stat
       // Set stat

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.s3.S3Exception;
 import org.apache.hadoop.fs.s3.S3Exception;
@@ -225,6 +226,7 @@ public class NativeS3FileSystem extends FileSystem {
     private OutputStream backupStream;
     private OutputStream backupStream;
     private MessageDigest digest;
     private MessageDigest digest;
     private boolean closed;
     private boolean closed;
+    private LocalDirAllocator lDirAlloc;
     
     
     public NativeS3FsOutputStream(Configuration conf,
     public NativeS3FsOutputStream(Configuration conf,
         NativeFileSystemStore store, String key, Progressable progress,
         NativeFileSystemStore store, String key, Progressable progress,
@@ -246,11 +248,10 @@ public class NativeS3FileSystem extends FileSystem {
     }
     }
 
 
     private File newBackupFile() throws IOException {
     private File newBackupFile() throws IOException {
-      File dir = new File(conf.get("fs.s3.buffer.dir"));
-      if (!dir.mkdirs() && !dir.exists()) {
-        throw new IOException("Cannot create S3 buffer directory: " + dir);
+      if (lDirAlloc == null) {
+        lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
       }
       }
-      File result = File.createTempFile("output-", ".tmp", dir);
+      File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
       result.deleteOnExit();
       result.deleteOnExit();
       return result;
       return result;
     }
     }

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -37,6 +37,8 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
@@ -279,6 +281,38 @@ class ChRootedFs extends AbstractFileSystem {
     myFs.setTimes(fullPath(f), mtime, atime);
     myFs.setTimes(fullPath(f), mtime, atime);
   }
   }
 
 
+  @Override
+  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+      throws IOException {
+    myFs.modifyAclEntries(fullPath(path), aclSpec);
+  }
+
+  @Override
+  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+      throws IOException {
+    myFs.removeAclEntries(fullPath(path), aclSpec);
+  }
+
+  @Override
+  public void removeDefaultAcl(Path path) throws IOException {
+    myFs.removeDefaultAcl(fullPath(path));
+  }
+
+  @Override
+  public void removeAcl(Path path) throws IOException {
+    myFs.removeAcl(fullPath(path));
+  }
+
+  @Override
+  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+    myFs.setAcl(fullPath(path), aclSpec);
+  }
+
+  @Override
+  public AclStatus getAclStatus(Path path) throws IOException {
+    return myFs.getAclStatus(fullPath(path));
+  }
+
   @Override
   @Override
   public void setVerifyChecksum(final boolean verifyChecksum) 
   public void setVerifyChecksum(final boolean verifyChecksum) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {

+ 42 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -871,5 +872,46 @@ public class ViewFileSystem extends FileSystem {
     public short getDefaultReplication(Path f) {
     public short getDefaultReplication(Path f) {
       throw new NotInMountpointException(f, "getDefaultReplication");
       throw new NotInMountpointException(f, "getDefaultReplication");
     }
     }
+
+    @Override
+    public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+        throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("modifyAclEntries", path);
+    }
+
+    @Override
+    public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+        throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeAclEntries", path);
+    }
+
+    @Override
+    public void removeDefaultAcl(Path path) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeDefaultAcl", path);
+    }
+
+    @Override
+    public void removeAcl(Path path) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeAcl", path);
+    }
+
+    @Override
+    public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setAcl", path);
+    }
+
+    @Override
+    public AclStatus getAclStatus(Path path) throws IOException {
+      checkPathIsSlash(path);
+      return new AclStatus.Builder().owner(ugi.getUserName())
+          .group(ugi.getGroupNames()[0])
+          .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
+          .stickyBit(false).build();
+    }
   }
   }
 }
 }

+ 89 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -49,6 +49,9 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -603,6 +606,51 @@ public class ViewFs extends AbstractFileSystem {
     return true;
     return true;
   }
   }
 
 
+  @Override
+  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
+  }
+
+  @Override
+  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
+  }
+
+  @Override
+  public void removeDefaultAcl(Path path)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.removeDefaultAcl(res.remainingPath);
+  }
+
+  @Override
+  public void removeAcl(Path path)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.removeAcl(res.remainingPath);
+  }
+
+  @Override
+  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
+  }
+
+  @Override
+  public AclStatus getAclStatus(Path path) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getAclStatus(res.remainingPath);
+  }
   
   
   
   
   /*
   /*
@@ -832,5 +880,46 @@ public class ViewFs extends AbstractFileSystem {
         throws AccessControlException {
         throws AccessControlException {
       throw readOnlyMountTable("setVerifyChecksum", "");   
       throw readOnlyMountTable("setVerifyChecksum", "");   
     }
     }
+
+    @Override
+    public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+        throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("modifyAclEntries", path);
+    }
+
+    @Override
+    public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+        throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeAclEntries", path);
+    }
+
+    @Override
+    public void removeDefaultAcl(Path path) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeDefaultAcl", path);
+    }
+
+    @Override
+    public void removeAcl(Path path) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeAcl", path);
+    }
+
+    @Override
+    public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setAcl", path);
+    }
+
+    @Override
+    public AclStatus getAclStatus(Path path) throws IOException {
+      checkPathIsSlash(path);
+      return new AclStatus.Builder().owner(ugi.getUserName())
+          .group(ugi.getGroupNames()[0])
+          .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
+          .stickyBit(false).build();
+    }
   }
   }
 }
 }

+ 13 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -292,9 +292,7 @@ public class Text extends BinaryComparable
   @Override
   @Override
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     int newLength = WritableUtils.readVInt(in);
     int newLength = WritableUtils.readVInt(in);
-    setCapacity(newLength, false);
-    in.readFully(bytes, 0, newLength);
-    length = newLength;
+    readWithKnownLength(in, newLength);
   }
   }
   
   
   public void readFields(DataInput in, int maxLength) throws IOException {
   public void readFields(DataInput in, int maxLength) throws IOException {
@@ -306,9 +304,7 @@ public class Text extends BinaryComparable
       throw new IOException("tried to deserialize " + newLength +
       throw new IOException("tried to deserialize " + newLength +
           " bytes of data, but maxLength = " + maxLength);
           " bytes of data, but maxLength = " + maxLength);
     }
     }
-    setCapacity(newLength, false);
-    in.readFully(bytes, 0, newLength);
-    length = newLength;
+    readWithKnownLength(in, newLength);
   }
   }
 
 
   /** Skips over one Text in the input. */
   /** Skips over one Text in the input. */
@@ -317,6 +313,17 @@ public class Text extends BinaryComparable
     WritableUtils.skipFully(in, length);
     WritableUtils.skipFully(in, length);
   }
   }
 
 
+  /**
+   * Read a Text object whose length is already known.
+   * This allows creating Text from a stream which uses a different serialization
+   * format.
+   */
+  public void readWithKnownLength(DataInput in, int len) throws IOException {
+    setCapacity(len, false);
+    in.readFully(bytes, 0, len);
+    length = len;
+  }
+
   /** serialize
   /** serialize
    * write this object to out
    * write this object to out
    * length uses zero-compressed encoding
    * length uses zero-compressed encoding

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java

@@ -100,7 +100,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
   @Override
   @Override
   public CompressionOutputStream createOutputStream(OutputStream out)
   public CompressionOutputStream createOutputStream(OutputStream out)
       throws IOException {
       throws IOException {
-    return createOutputStream(out, createCompressor());
+    return CompressionCodec.Util.
+        createOutputStreamWithCodecPool(this, conf, out);
   }
   }
 
 
   /**
   /**
@@ -153,7 +154,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
   @Override
   @Override
   public CompressionInputStream createInputStream(InputStream in)
   public CompressionInputStream createInputStream(InputStream in)
       throws IOException {
       throws IOException {
-    return createInputStream(in, createDecompressor());
+    return CompressionCodec.Util.
+        createInputStreamWithCodecPool(this, conf, in);
   }
   }
 
 
   /**
   /**

+ 55 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java

@@ -24,6 +24,7 @@ import java.io.OutputStream;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 
 
 /**
 /**
  * This class encapsulates a streaming compression/decompression pair.
  * This class encapsulates a streaming compression/decompression pair.
@@ -113,4 +114,58 @@ public interface CompressionCodec {
    * @return the extension including the '.'
    * @return the extension including the '.'
    */
    */
   String getDefaultExtension();
   String getDefaultExtension();
+
+  static class Util {
+    /**
+     * Create an output stream with a codec taken from the global CodecPool.
+     *
+     * @param codec       The codec to use to create the output stream.
+     * @param conf        The configuration to use if we need to create a new codec.
+     * @param out         The output stream to wrap.
+     * @return            The new output stream
+     * @throws IOException
+     */
+    static CompressionOutputStream createOutputStreamWithCodecPool(
+        CompressionCodec codec, Configuration conf, OutputStream out)
+        throws IOException {
+      Compressor compressor = CodecPool.getCompressor(codec, conf);
+      CompressionOutputStream stream = null;
+      try {
+        stream = codec.createOutputStream(out, compressor);
+      } finally {
+        if (stream == null) {
+          CodecPool.returnCompressor(compressor);
+        } else {
+          stream.setTrackedCompressor(compressor);
+        }
+      }
+      return stream;
+    }
+
+    /**
+     * Create an input stream with a codec taken from the global CodecPool.
+     *
+     * @param codec       The codec to use to create the input stream.
+     * @param conf        The configuration to use if we need to create a new codec.
+     * @param in          The input stream to wrap.
+     * @return            The new input stream
+     * @throws IOException
+     */
+    static CompressionInputStream createInputStreamWithCodecPool(
+        CompressionCodec codec,  Configuration conf, InputStream in)
+          throws IOException {
+      Decompressor decompressor = CodecPool.getDecompressor(codec);
+      CompressionInputStream stream = null;
+      try {
+        stream = codec.createInputStream(in, decompressor);
+      } finally {
+        if (stream == null) {
+          CodecPool.returnDecompressor(decompressor);
+        } else {
+          stream.setTrackedDecompressor(decompressor);
+        }
+      }
+      return stream;
+    }
+  }
 }
 }

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java

@@ -41,6 +41,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
   protected final InputStream in;
   protected final InputStream in;
   protected long maxAvailableData = 0L;
   protected long maxAvailableData = 0L;
 
 
+  private Decompressor trackedDecompressor;
+
   /**
   /**
    * Create a compression input stream that reads
    * Create a compression input stream that reads
    * the decompressed bytes from the given stream.
    * the decompressed bytes from the given stream.
@@ -58,6 +60,10 @@ public abstract class CompressionInputStream extends InputStream implements Seek
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     in.close();
     in.close();
+    if (trackedDecompressor != null) {
+      CodecPool.returnDecompressor(trackedDecompressor);
+      trackedDecompressor = null;
+    }
   }
   }
   
   
   /**
   /**
@@ -112,4 +118,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
   public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
   public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
     throw new UnsupportedOperationException();
     throw new UnsupportedOperationException();
   }
   }
+
+  void setTrackedDecompressor(Decompressor decompressor) {
+    trackedDecompressor = decompressor;
+  }
 }
 }

+ 16 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java

@@ -34,7 +34,13 @@ public abstract class CompressionOutputStream extends OutputStream {
    * The output stream to be compressed. 
    * The output stream to be compressed. 
    */
    */
   protected final OutputStream out;
   protected final OutputStream out;
-  
+
+  /**
+   * If non-null, this is the Compressor object that we should call
+   * CodecPool#returnCompressor on when this stream is closed.
+   */
+  private Compressor trackedCompressor;
+
   /**
   /**
    * Create a compression output stream that writes
    * Create a compression output stream that writes
    * the compressed bytes to the given stream.
    * the compressed bytes to the given stream.
@@ -43,11 +49,19 @@ public abstract class CompressionOutputStream extends OutputStream {
   protected CompressionOutputStream(OutputStream out) {
   protected CompressionOutputStream(OutputStream out) {
     this.out = out;
     this.out = out;
   }
   }
-  
+
+  void setTrackedCompressor(Compressor compressor) {
+    trackedCompressor = compressor;
+  }
+
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     finish();
     finish();
     out.close();
     out.close();
+    if (trackedCompressor != null) {
+      CodecPool.returnCompressor(trackedCompressor);
+      trackedCompressor = null;
+    }
   }
   }
   
   
   @Override
   @Override

+ 4 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -51,14 +51,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
   @Override
   @Override
   public CompressionOutputStream createOutputStream(OutputStream out) 
   public CompressionOutputStream createOutputStream(OutputStream out) 
   throws IOException {
   throws IOException {
-    // This may leak memory if called in a loop. The createCompressor() call
-    // may cause allocation of an untracked direct-backed buffer if native
-    // libs are being used (even if you close the stream).  A Compressor
-    // object should be reused between successive calls.
-    LOG.warn("DefaultCodec.createOutputStream() may leak memory. "
-        + "Create a compressor first.");
-    return new CompressorStream(out, createCompressor(), 
-                                conf.getInt("io.file.buffer.size", 4*1024));
+    return CompressionCodec.Util.
+        createOutputStreamWithCodecPool(this, conf, out);
   }
   }
 
 
   @Override
   @Override
@@ -82,8 +76,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
   @Override
   @Override
   public CompressionInputStream createInputStream(InputStream in) 
   public CompressionInputStream createInputStream(InputStream in) 
   throws IOException {
   throws IOException {
-    return new DecompressorStream(in, createDecompressor(),
-                                  conf.getInt("io.file.buffer.size", 4*1024));
+    return CompressionCodec.Util.
+        createInputStreamWithCodecPool(this, conf, in);
   }
   }
 
 
   @Override
   @Override

+ 8 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -159,10 +159,11 @@ public class GzipCodec extends DefaultCodec {
   @Override
   @Override
   public CompressionOutputStream createOutputStream(OutputStream out) 
   public CompressionOutputStream createOutputStream(OutputStream out) 
     throws IOException {
     throws IOException {
-    return (ZlibFactory.isNativeZlibLoaded(conf)) ?
-               new CompressorStream(out, createCompressor(),
-                                    conf.getInt("io.file.buffer.size", 4*1024)) :
-               new GzipOutputStream(out);
+    if (!ZlibFactory.isNativeZlibLoaded(conf)) {
+      return new GzipOutputStream(out);
+    }
+    return CompressionCodec.Util.
+        createOutputStreamWithCodecPool(this, conf, out);
   }
   }
   
   
   @Override
   @Override
@@ -192,8 +193,9 @@ public class GzipCodec extends DefaultCodec {
 
 
   @Override
   @Override
   public CompressionInputStream createInputStream(InputStream in)
   public CompressionInputStream createInputStream(InputStream in)
-  throws IOException {
-    return createInputStream(in, null);
+      throws IOException {
+    return CompressionCodec.Util.
+        createInputStreamWithCodecPool(this, conf, in);
   }
   }
 
 
   @Override
   @Override

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java

@@ -84,7 +84,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
   @Override
   @Override
   public CompressionOutputStream createOutputStream(OutputStream out)
   public CompressionOutputStream createOutputStream(OutputStream out)
       throws IOException {
       throws IOException {
-    return createOutputStream(out, createCompressor());
+    return CompressionCodec.Util.
+        createOutputStreamWithCodecPool(this, conf, out);
   }
   }
 
 
   /**
   /**
@@ -157,7 +158,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
   @Override
   @Override
   public CompressionInputStream createInputStream(InputStream in)
   public CompressionInputStream createInputStream(InputStream in)
       throws IOException {
       throws IOException {
-    return createInputStream(in, createDecompressor());
+    return CompressionCodec.Util.
+        createInputStreamWithCodecPool(this, conf, in);
   }
   }
 
 
   /**
   /**

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java

@@ -95,7 +95,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
   @Override
   @Override
   public CompressionOutputStream createOutputStream(OutputStream out)
   public CompressionOutputStream createOutputStream(OutputStream out)
       throws IOException {
       throws IOException {
-    return createOutputStream(out, createCompressor());
+    return CompressionCodec.Util.
+        createOutputStreamWithCodecPool(this, conf, out);
   }
   }
 
 
   /**
   /**
@@ -158,7 +159,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
   @Override
   @Override
   public CompressionInputStream createInputStream(InputStream in)
   public CompressionInputStream createInputStream(InputStream in)
       throws IOException {
       throws IOException {
-    return createInputStream(in, createDecompressor());
+    return CompressionCodec.Util.
+        createInputStreamWithCodecPool(this, conf, in);
   }
   }
 
 
   /**
   /**

+ 20 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -599,24 +599,35 @@ public class ProtobufRpcEngine implements RpcEngine {
             .mergeFrom(request.theRequestRead).build();
             .mergeFrom(request.theRequestRead).build();
         
         
         Message result;
         Message result;
+        long startTime = Time.now();
+        int qTime = (int) (startTime - receiveTime);
+        Exception exception = null;
         try {
         try {
-          long startTime = Time.now();
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           result = service.callBlockingMethod(methodDescriptor, null, param);
           result = service.callBlockingMethod(methodDescriptor, null, param);
+        } catch (ServiceException e) {
+          exception = (Exception) e.getCause();
+          throw (Exception) e.getCause();
+        } catch (Exception e) {
+          exception = e;
+          throw e;
+        } finally {
           int processingTime = (int) (Time.now() - startTime);
           int processingTime = (int) (Time.now() - startTime);
-          int qTime = (int) (startTime - receiveTime);
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
-            LOG.info("Served: " + methodName + " queueTime= " + qTime +
-                      " procesingTime= " + processingTime);
+            String msg = "Served: " + methodName + " queueTime= " + qTime +
+                " procesingTime= " + processingTime;
+            if (exception != null) {
+              msg += " exception= " + exception.getClass().getSimpleName();
+            }
+            LOG.debug(msg);
           }
           }
+          String detailedMetricsName = (exception == null) ?
+              methodName :
+              exception.getClass().getSimpleName();
           server.rpcMetrics.addRpcQueueTime(qTime);
           server.rpcMetrics.addRpcQueueTime(qTime);
           server.rpcMetrics.addRpcProcessingTime(processingTime);
           server.rpcMetrics.addRpcProcessingTime(processingTime);
-          server.rpcDetailedMetrics.addProcessingTime(methodName,
+          server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
               processingTime);
               processingTime);
-        } catch (ServiceException e) {
-          throw (Exception) e.getCause();
-        } catch (Exception e) {
-          throw e;
         }
         }
         return new RpcResponseWrapper(result);
         return new RpcResponseWrapper(result);
       }
       }

+ 4 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -355,8 +355,8 @@ public abstract class Server {
   private int readThreads;                        // number of read threads
   private int readThreads;                        // number of read threads
   private int readerPendingConnectionQueue;         // number of connections to queue per read thread
   private int readerPendingConnectionQueue;         // number of connections to queue per read thread
   private Class<? extends Writable> rpcRequestClass;   // class used for deserializing the rpc request
   private Class<? extends Writable> rpcRequestClass;   // class used for deserializing the rpc request
-  protected RpcMetrics rpcMetrics;
-  protected RpcDetailedMetrics rpcDetailedMetrics;
+  final protected RpcMetrics rpcMetrics;
+  final protected RpcDetailedMetrics rpcDetailedMetrics;
   
   
   private Configuration conf;
   private Configuration conf;
   private String portRangeConfig = null;
   private String portRangeConfig = null;
@@ -2494,12 +2494,8 @@ public abstract class Server {
     listener.doStop();
     listener.doStop();
     responder.interrupt();
     responder.interrupt();
     notifyAll();
     notifyAll();
-    if (this.rpcMetrics != null) {
-      this.rpcMetrics.shutdown();
-    }
-    if (this.rpcDetailedMetrics != null) {
-      this.rpcDetailedMetrics.shutdown();
-    }
+    this.rpcMetrics.shutdown();
+    this.rpcDetailedMetrics.shutdown();
   }
   }
 
 
   /** Wait for the server to be stopped.
   /** Wait for the server to be stopped.

+ 26 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -471,37 +471,29 @@ public class WritableRpcEngine implements RpcEngine {
           
           
 
 
           // Invoke the protocol method
           // Invoke the protocol method
+       long startTime = Time.now();
+       int qTime = (int) (startTime-receivedTime);
+       Exception exception = null;
        try {
        try {
-          long startTime = Time.now();
-          Method method = 
+          Method method =
               protocolImpl.protocolClass.getMethod(call.getMethodName(),
               protocolImpl.protocolClass.getMethod(call.getMethodName(),
               call.getParameterClasses());
               call.getParameterClasses());
           method.setAccessible(true);
           method.setAccessible(true);
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
           Object value = 
           Object value = 
               method.invoke(protocolImpl.protocolImpl, call.getParameters());
               method.invoke(protocolImpl.protocolImpl, call.getParameters());
-          int processingTime = (int) (Time.now() - startTime);
-          int qTime = (int) (startTime-receivedTime);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Served: " + call.getMethodName() +
-                      " queueTime= " + qTime +
-                      " procesingTime= " + processingTime);
-          }
-          server.rpcMetrics.addRpcQueueTime(qTime);
-          server.rpcMetrics.addRpcProcessingTime(processingTime);
-          server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
-                                               processingTime);
           if (server.verbose) log("Return: "+value);
           if (server.verbose) log("Return: "+value);
-
           return new ObjectWritable(method.getReturnType(), value);
           return new ObjectWritable(method.getReturnType(), value);
 
 
         } catch (InvocationTargetException e) {
         } catch (InvocationTargetException e) {
           Throwable target = e.getTargetException();
           Throwable target = e.getTargetException();
           if (target instanceof IOException) {
           if (target instanceof IOException) {
+            exception = (IOException)target;
             throw (IOException)target;
             throw (IOException)target;
           } else {
           } else {
             IOException ioe = new IOException(target.toString());
             IOException ioe = new IOException(target.toString());
             ioe.setStackTrace(target.getStackTrace());
             ioe.setStackTrace(target.getStackTrace());
+            exception = ioe;
             throw ioe;
             throw ioe;
           }
           }
         } catch (Throwable e) {
         } catch (Throwable e) {
@@ -510,8 +502,27 @@ public class WritableRpcEngine implements RpcEngine {
           }
           }
           IOException ioe = new IOException(e.toString());
           IOException ioe = new IOException(e.toString());
           ioe.setStackTrace(e.getStackTrace());
           ioe.setStackTrace(e.getStackTrace());
+          exception = ioe;
           throw ioe;
           throw ioe;
-        }
+        } finally {
+         int processingTime = (int) (Time.now() - startTime);
+         if (LOG.isDebugEnabled()) {
+           String msg = "Served: " + call.getMethodName() +
+               " queueTime= " + qTime +
+               " procesingTime= " + processingTime;
+           if (exception != null) {
+             msg += " exception= " + exception.getClass().getSimpleName();
+           }
+           LOG.debug(msg);
+         }
+         String detailedMetricsName = (exception == null) ?
+             call.getMethodName() :
+             exception.getClass().getSimpleName();
+         server.rpcMetrics.addRpcQueueTime(qTime);
+         server.rpcMetrics.addRpcProcessingTime(processingTime);
+         server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
+             processingTime);
+       }
       }
       }
     }
     }
   }
   }

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java

@@ -54,6 +54,12 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
    */
    */
   public abstract <T> T register(String name, String desc, T source);
   public abstract <T> T register(String name, String desc, T source);
 
 
+  /**
+   * Unregister a metrics source
+   * @param name of the source. This is the name you use to call register()
+   */
+  public abstract void unregisterSource(String name);
+
   /**
   /**
    * Register a metrics source (deriving name and description from the object)
    * Register a metrics source (deriving name and description from the object)
    * @param <T>   the actual type of the source object
    * @param <T>   the actual type of the source object

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java

@@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
   private ClassLoader pluginLoader;
   private ClassLoader pluginLoader;
 
 
   MetricsConfig(Configuration c, String prefix) {
   MetricsConfig(Configuration c, String prefix) {
-    super(c, prefix, ".");
+    super(c, prefix.toLowerCase(Locale.US), ".");
   }
   }
 
 
   static MetricsConfig create(String prefix) {
   static MetricsConfig create(String prefix) {

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -232,6 +232,17 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
     return source;
     return source;
   }
   }
 
 
+  @Override public synchronized
+  void unregisterSource(String name) {
+    if (sources.containsKey(name)) {
+      sources.get(name).stop();
+      sources.remove(name);
+    }
+    if (allSources.containsKey(name)) {
+      allSources.remove(name);
+    }
+  }
+
   synchronized
   synchronized
   void registerSource(String name, String desc, MetricsSource source) {
   void registerSource(String name, String desc, MetricsSource source) {
     checkNotNull(config, "config");
     checkNotNull(config, "config");

+ 6 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -883,8 +883,8 @@ public class NetworkTopology {
    * @param seed Used to seed the pseudo-random generator that randomizes the
    * @param seed Used to seed the pseudo-random generator that randomizes the
    *          set of nodes at each network distance.
    *          set of nodes at each network distance.
    */
    */
-  public void sortByDistance(Node reader, Node[] nodes,
-      int activeLen, long seed) {
+  public void sortByDistance(Node reader, Node[] nodes, int activeLen,
+      long seed, boolean randomizeBlockLocationsPerBlock) {
     /** Sort weights for the nodes array */
     /** Sort weights for the nodes array */
     int[] weights = new int[activeLen];
     int[] weights = new int[activeLen];
     for (int i=0; i<activeLen; i++) {
     for (int i=0; i<activeLen; i++) {
@@ -906,8 +906,11 @@ public class NetworkTopology {
     // Seed is normally the block id
     // Seed is normally the block id
     // This means we use the same pseudo-random order for each block, for
     // This means we use the same pseudo-random order for each block, for
     // potentially better page cache usage.
     // potentially better page cache usage.
+    // Seed is not used if we want to randomize block location for every block
     Random rand = getRandom();
     Random rand = getRandom();
-    rand.setSeed(seed);
+    if (!randomizeBlockLocationsPerBlock) {
+      rand.setSeed(seed);
+    }
     int idx = 0;
     int idx = 0;
     for (List<Node> list: tree.values()) {
     for (List<Node> list: tree.values()) {
       if (list != null) {
       if (list != null) {

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java

@@ -279,8 +279,8 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
    *          set of nodes at each network distance.
    *          set of nodes at each network distance.
    */
    */
   @Override
   @Override
-  public void sortByDistance( Node reader, Node[] nodes,
-      int activeLen, long seed) {
+  public void sortByDistance(Node reader, Node[] nodes, int activeLen,
+      long seed, boolean randomizeBlockLocationsPerBlock) {
     // If reader is not a datanode (not in NetworkTopology tree), we need to
     // If reader is not a datanode (not in NetworkTopology tree), we need to
     // replace this reader with a sibling leaf node in tree.
     // replace this reader with a sibling leaf node in tree.
     if (reader != null && !this.contains(reader)) {
     if (reader != null && !this.contains(reader)) {
@@ -293,7 +293,8 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
         return;
         return;
       }
       }
     }
     }
-    super.sortByDistance(reader, nodes, nodes.length, seed);
+    super.sortByDistance(reader, nodes, nodes.length, seed,
+        randomizeBlockLocationsPerBlock);
   }
   }
 
 
   /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
   /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack

+ 46 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -58,24 +58,35 @@ public class Groups {
   private final Map<String, List<String>> staticUserToGroupsMap = 
   private final Map<String, List<String>> staticUserToGroupsMap = 
       new HashMap<String, List<String>>();
       new HashMap<String, List<String>>();
   private final long cacheTimeout;
   private final long cacheTimeout;
+  private final long negativeCacheTimeout;
   private final long warningDeltaMs;
   private final long warningDeltaMs;
+  private final Timer timer;
 
 
   public Groups(Configuration conf) {
   public Groups(Configuration conf) {
+    this(conf, new Timer());
+  }
+
+  public Groups(Configuration conf, Timer timer) {
     impl = 
     impl = 
       ReflectionUtils.newInstance(
       ReflectionUtils.newInstance(
           conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, 
           conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, 
                         ShellBasedUnixGroupsMapping.class, 
                         ShellBasedUnixGroupsMapping.class, 
                         GroupMappingServiceProvider.class), 
                         GroupMappingServiceProvider.class), 
           conf);
           conf);
-    
+
     cacheTimeout = 
     cacheTimeout = 
       conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
       conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
           CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
           CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
+    negativeCacheTimeout =
+      conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
+          CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
     warningDeltaMs =
     warningDeltaMs =
       conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
       conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
         CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
         CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
     parseStaticMapping(conf);
     parseStaticMapping(conf);
 
 
+    this.timer = timer;
+
     if(LOG.isDebugEnabled())
     if(LOG.isDebugEnabled())
       LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
       LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
           "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
           "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
@@ -111,7 +122,29 @@ public class Groups {
       staticUserToGroupsMap.put(user, groups);
       staticUserToGroupsMap.put(user, groups);
     }
     }
   }
   }
+
+  /**
+   * Determine whether the CachedGroups is expired.
+   * @param groups cached groups for one user.
+   * @return true if groups is expired from useToGroupsMap.
+   */
+  private boolean hasExpired(CachedGroups groups, long startMs) {
+    if (groups == null) {
+      return true;
+    }
+    long timeout = cacheTimeout;
+    if (isNegativeCacheEnabled() && groups.getGroups().isEmpty()) {
+      // This CachedGroups is in the negative cache, thus it should expire
+      // sooner.
+      timeout = negativeCacheTimeout;
+    }
+    return groups.getTimestamp() + timeout <= startMs;
+  }
   
   
+  private boolean isNegativeCacheEnabled() {
+    return negativeCacheTimeout > 0;
+  }
+
   /**
   /**
    * Get the group memberships of a given user.
    * Get the group memberships of a given user.
    * @param user User's name
    * @param user User's name
@@ -126,18 +159,22 @@ public class Groups {
     }
     }
     // Return cached value if available
     // Return cached value if available
     CachedGroups groups = userToGroupsMap.get(user);
     CachedGroups groups = userToGroupsMap.get(user);
-    long startMs = Time.monotonicNow();
-    // if cache has a value and it hasn't expired
-    if (groups != null && (groups.getTimestamp() + cacheTimeout > startMs)) {
+    long startMs = timer.monotonicNow();
+    if (!hasExpired(groups, startMs)) {
       if(LOG.isDebugEnabled()) {
       if(LOG.isDebugEnabled()) {
         LOG.debug("Returning cached groups for '" + user + "'");
         LOG.debug("Returning cached groups for '" + user + "'");
       }
       }
+      if (groups.getGroups().isEmpty()) {
+        // Even with enabling negative cache, getGroups() has the same behavior
+        // that throws IOException if the groups for the user is empty.
+        throw new IOException("No groups found for user " + user);
+      }
       return groups.getGroups();
       return groups.getGroups();
     }
     }
 
 
     // Create and cache user's groups
     // Create and cache user's groups
     List<String> groupList = impl.getGroups(user);
     List<String> groupList = impl.getGroups(user);
-    long endMs = Time.monotonicNow();
+    long endMs = timer.monotonicNow();
     long deltaMs = endMs - startMs ;
     long deltaMs = endMs - startMs ;
     UserGroupInformation.metrics.addGetGroups(deltaMs);
     UserGroupInformation.metrics.addGetGroups(deltaMs);
     if (deltaMs > warningDeltaMs) {
     if (deltaMs > warningDeltaMs) {
@@ -146,6 +183,9 @@ public class Groups {
     }
     }
     groups = new CachedGroups(groupList, endMs);
     groups = new CachedGroups(groupList, endMs);
     if (groups.getGroups().isEmpty()) {
     if (groups.getGroups().isEmpty()) {
+      if (isNegativeCacheEnabled()) {
+        userToGroupsMap.put(user, groups);
+      }
       throw new IOException("No groups found for user " + user);
       throw new IOException("No groups found for user " + user);
     }
     }
     userToGroupsMap.put(user, groups);
     userToGroupsMap.put(user, groups);

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -201,7 +201,8 @@ public class LdapGroupsMapping
     } catch (CommunicationException e) {
     } catch (CommunicationException e) {
       LOG.warn("Connection is closed, will try to reconnect");
       LOG.warn("Connection is closed, will try to reconnect");
     } catch (NamingException e) {
     } catch (NamingException e) {
-      LOG.warn("Exception trying to get groups for user " + user, e);
+      LOG.warn("Exception trying to get groups for user " + user + ": "
+          + e.getMessage());
       return emptyResults;
       return emptyResults;
     }
     }
 
 
@@ -215,7 +216,8 @@ public class LdapGroupsMapping
       } catch (CommunicationException e) {
       } catch (CommunicationException e) {
         LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
         LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
       } catch (NamingException e) {
       } catch (NamingException e) {
-        LOG.warn("Exception trying to get groups for user " + user, e);
+        LOG.warn("Exception trying to get groups for user " + user + ":"
+            + e.getMessage());
         return emptyResults;
         return emptyResults;
       }
       }
     }
     }

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java

@@ -84,7 +84,8 @@ public class ShellBasedUnixGroupsMapping
       result = Shell.execCommand(Shell.getGroupsForUserCommand(user));
       result = Shell.execCommand(Shell.getGroupsForUserCommand(user));
     } catch (ExitCodeException e) {
     } catch (ExitCodeException e) {
       // if we didn't get the group - just return empty list;
       // if we didn't get the group - just return empty list;
-      LOG.warn("got exception trying to get groups for user " + user, e);
+      LOG.warn("got exception trying to get groups for user " + user + ": "
+          + e.getMessage());
       return new LinkedList<String>();
       return new LinkedList<String>();
     }
     }
     
     

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java

@@ -373,12 +373,12 @@ public class CredentialShell extends Configured implements Tool {
       char[] newPassword2 = c.readPassword("Enter password again: ");
       char[] newPassword2 = c.readPassword("Enter password again: ");
       noMatch = !Arrays.equals(newPassword1, newPassword2);
       noMatch = !Arrays.equals(newPassword1, newPassword2);
       if (noMatch) {
       if (noMatch) {
-        Arrays.fill(newPassword1, ' ');
+        if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
         c.format("Passwords don't match. Try again.%n");
         c.format("Passwords don't match. Try again.%n");
       } else {
       } else {
         cred = newPassword1;
         cred = newPassword1;
       }
       }
-      Arrays.fill(newPassword2, ' ');
+      if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
     } while (noMatch);
     } while (noMatch);
     return cred;
     return cred;
   }
   }

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java

@@ -230,6 +230,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
 
 
   CredentialEntry innerSetCredential(String alias, char[] material)
   CredentialEntry innerSetCredential(String alias, char[] material)
       throws IOException {
       throws IOException {
+    writeLock.lock();
     try {
     try {
       keyStore.setKeyEntry(alias, new SecretKeySpec(
       keyStore.setKeyEntry(alias, new SecretKeySpec(
           new String(material).getBytes("UTF-8"), "AES"),
           new String(material).getBytes("UTF-8"), "AES"),
@@ -237,6 +238,8 @@ public class JavaKeyStoreProvider extends CredentialProvider {
     } catch (KeyStoreException e) {
     } catch (KeyStoreException e) {
       throw new IOException("Can't store credential " + alias + " in " + this,
       throw new IOException("Can't store credential " + alias + " in " + this,
           e);
           e);
+    } finally {
+      writeLock.unlock();
     }
     }
     changed = true;
     changed = true;
     return new CredentialEntry(alias, material);
     return new CredentialEntry(alias, material);

+ 46 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java

@@ -24,37 +24,64 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.MachineList;
 import org.apache.hadoop.util.MachineList;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
+@InterfaceStability.Unstable
+@InterfaceAudience.Public
 public class DefaultImpersonationProvider implements ImpersonationProvider {
 public class DefaultImpersonationProvider implements ImpersonationProvider {
   private static final String CONF_HOSTS = ".hosts";
   private static final String CONF_HOSTS = ".hosts";
   private static final String CONF_USERS = ".users";
   private static final String CONF_USERS = ".users";
   private static final String CONF_GROUPS = ".groups";
   private static final String CONF_GROUPS = ".groups";
-  private static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
-  private static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
-  private static final String CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS = 
-      CONF_HADOOP_PROXYUSER_RE+"[^.]*(" + Pattern.quote(CONF_USERS) +
-      "|" + Pattern.quote(CONF_GROUPS) + ")";
-  private static final String CONF_HADOOP_PROXYUSER_RE_HOSTS = 
-      CONF_HADOOP_PROXYUSER_RE+"[^.]*"+ Pattern.quote(CONF_HOSTS);
   // acl and list of hosts per proxyuser
   // acl and list of hosts per proxyuser
   private Map<String, AccessControlList> proxyUserAcl = 
   private Map<String, AccessControlList> proxyUserAcl = 
     new HashMap<String, AccessControlList>();
     new HashMap<String, AccessControlList>();
-  private static Map<String, MachineList> proxyHosts = 
+  private Map<String, MachineList> proxyHosts =
     new HashMap<String, MachineList>();
     new HashMap<String, MachineList>();
   private Configuration conf;
   private Configuration conf;
 
 
+
+  private static DefaultImpersonationProvider testProvider;
+
+  public static synchronized DefaultImpersonationProvider getTestProvider() {
+    if (testProvider == null) {
+      testProvider = new DefaultImpersonationProvider();
+      testProvider.setConf(new Configuration());
+      testProvider.init(ProxyUsers.CONF_HADOOP_PROXYUSER);
+    }
+    return testProvider;
+  }
+
   @Override
   @Override
   public void setConf(Configuration conf) {
   public void setConf(Configuration conf) {
     this.conf = conf;
     this.conf = conf;
+  }
+
+  private String configPrefix;
 
 
-    // get list of users and groups per proxyuser
+  @Override
+  public void init(String configurationPrefix) {
+    configPrefix = configurationPrefix +
+        (configurationPrefix.endsWith(".") ? "" : ".");
+    
+    // constructing regex to match the following patterns:
+    //   $configPrefix.[ANY].users
+    //   $configPrefix.[ANY].groups
+    //   $configPrefix.[ANY].hosts
+    //
+    String prefixRegEx = configPrefix.replace(".", "\\.");
+    String usersGroupsRegEx = prefixRegEx + "[^.]*(" +
+        Pattern.quote(CONF_USERS) + "|" + Pattern.quote(CONF_GROUPS) + ")";
+    String hostsRegEx = prefixRegEx + "[^.]*" + Pattern.quote(CONF_HOSTS);
+
+  // get list of users and groups per proxyuser
     Map<String,String> allMatchKeys = 
     Map<String,String> allMatchKeys = 
-        conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS); 
+        conf.getValByRegex(usersGroupsRegEx);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {  
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {  
       String aclKey = getAclKey(entry.getKey());
       String aclKey = getAclKey(entry.getKey());
       if (!proxyUserAcl.containsKey(aclKey)) {
       if (!proxyUserAcl.containsKey(aclKey)) {
@@ -65,7 +92,7 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
     }
     }
 
 
     // get hosts per proxyuser
     // get hosts per proxyuser
-    allMatchKeys = conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_HOSTS);
+    allMatchKeys = conf.getValByRegex(hostsRegEx);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
       proxyHosts.put(entry.getKey(),
       proxyHosts.put(entry.getKey(),
           new MachineList(entry.getValue()));
           new MachineList(entry.getValue()));
@@ -86,8 +113,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
       return;
       return;
     }
     }
     
     
-    AccessControlList acl = proxyUserAcl.get(
-        CONF_HADOOP_PROXYUSER+realUser.getShortUserName());
+    AccessControlList acl = proxyUserAcl.get(configPrefix +
+        realUser.getShortUserName());
     if (acl == null || !acl.isUserAllowed(user)) {
     if (acl == null || !acl.isUserAllowed(user)) {
       throw new AuthorizationException("User: " + realUser.getUserName()
       throw new AuthorizationException("User: " + realUser.getUserName()
           + " is not allowed to impersonate " + user.getUserName());
           + " is not allowed to impersonate " + user.getUserName());
@@ -116,8 +143,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
    * @param userName name of the superuser
    * @param userName name of the superuser
    * @return configuration key for superuser usergroups
    * @return configuration key for superuser usergroups
    */
    */
-  public static String getProxySuperuserUserConfKey(String userName) {
-    return CONF_HADOOP_PROXYUSER+userName+CONF_USERS;
+  public String getProxySuperuserUserConfKey(String userName) {
+    return configPrefix + userName + CONF_USERS;
   }
   }
 
 
   /**
   /**
@@ -126,8 +153,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
    * @param userName name of the superuser
    * @param userName name of the superuser
    * @return configuration key for superuser groups
    * @return configuration key for superuser groups
    */
    */
-  public static String getProxySuperuserGroupConfKey(String userName) {
-    return CONF_HADOOP_PROXYUSER+userName+CONF_GROUPS;
+  public String getProxySuperuserGroupConfKey(String userName) {
+    return configPrefix + userName + CONF_GROUPS;
   }
   }
 
 
   /**
   /**
@@ -136,8 +163,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
    * @param userName name of the superuser
    * @param userName name of the superuser
    * @return configuration key for superuser ip-addresses
    * @return configuration key for superuser ip-addresses
    */
    */
-  public static String getProxySuperuserIpConfKey(String userName) {
-    return CONF_HADOOP_PROXYUSER+userName+CONF_HOSTS;
+  public String getProxySuperuserIpConfKey(String userName) {
+    return configPrefix + userName + CONF_HOSTS;
   }
   }
 
 
   @VisibleForTesting
   @VisibleForTesting

+ 15 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java

@@ -18,10 +18,25 @@
 
 
 package org.apache.hadoop.security.authorize;
 package org.apache.hadoop.security.authorize;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
+@InterfaceStability.Unstable
+@InterfaceAudience.Public
 public interface ImpersonationProvider  extends Configurable {
 public interface ImpersonationProvider  extends Configurable {
+
+
+  /**
+   * Specifies the configuration prefix for the proxy user properties and
+   * initializes the provider.
+   *
+   * @param configurationPrefix the configuration prefix for the proxy user
+   * properties
+   */
+  public void init(String configurationPrefix);
+
   /**
   /**
    * Authorize the superuser which is doing doAs
    * Authorize the superuser which is doing doAs
    * 
    * 

+ 25 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java

@@ -18,7 +18,9 @@
 
 
 package org.apache.hadoop.security.authorize;
 package org.apache.hadoop.security.authorize;
 
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -26,9 +28,12 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
+@InterfaceStability.Unstable
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive"})
 public class ProxyUsers {
 public class ProxyUsers {
 
 
+  public static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser";
+
   private static volatile ImpersonationProvider sip ;
   private static volatile ImpersonationProvider sip ;
 
 
   /**
   /**
@@ -54,15 +59,31 @@ public class ProxyUsers {
   }
   }
 
 
   /**
   /**
-   * refresh configuration
-   * @param conf
+   * Refreshes configuration using the specified Proxy user prefix for
+   * properties.
+   *
+   * @param conf configuration
+   * @param proxyUserPrefix proxy user configuration prefix
    */
    */
-  public static void refreshSuperUserGroupsConfiguration(Configuration conf) { 
+  public static void refreshSuperUserGroupsConfiguration(Configuration conf,
+      String proxyUserPrefix) {
+    Preconditions.checkArgument(proxyUserPrefix != null && 
+        !proxyUserPrefix.isEmpty(), "prefix cannot be NULL or empty");
     // sip is volatile. Any assignment to it as well as the object's state
     // sip is volatile. Any assignment to it as well as the object's state
     // will be visible to all the other threads. 
     // will be visible to all the other threads. 
-    sip = getInstance(conf);
+    ImpersonationProvider ip = getInstance(conf);
+    ip.init(proxyUserPrefix);
+    sip = ip;
     ProxyServers.refresh(conf);
     ProxyServers.refresh(conf);
   }
   }
+
+  /**
+   * Refreshes configuration using the default Proxy user prefix for properties.
+   * @param conf configuration
+   */
+  public static void refreshSuperUserGroupsConfiguration(Configuration conf) {
+    refreshSuperUserGroupsConfiguration(conf, CONF_HADOOP_PROXYUSER);
+  }
   
   
   /**
   /**
    * Authorize the superuser which is doing doAs
    * Authorize the superuser which is doing doAs

+ 51 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Timer.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Utility methods for getting the time and computing intervals.
+ *
+ * It has the same behavior as {{@link Time}}, with the exception that its
+ * functions can be overridden for dependency injection purposes.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class Timer {
+  /**
+   * Current system time.  Do not use this to calculate a duration or interval
+   * to sleep, because it will be broken by settimeofday.  Instead, use
+   * monotonicNow.
+   * @return current time in msec.
+   */
+  public long now() {
+    return Time.now();
+  }
+
+  /**
+   * Current time from some arbitrary time base in the past, counting in
+   * milliseconds, and not affected by settimeofday or similar system clock
+   * changes.  This is appropriate to use when computing how much longer to
+   * wait for an interval to expire.
+   * @return a monotonic clock that counts in milliseconds.
+   */
+  public long monotonicNow() { return Time.monotonicNow(); }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Tool.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configurable;
  * 
  * 
  * <p><code>Tool</code>, is the standard for any Map-Reduce tool/application. 
  * <p><code>Tool</code>, is the standard for any Map-Reduce tool/application. 
  * The tool/application should delegate the handling of 
  * The tool/application should delegate the handling of 
- * <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
  * standard command-line options</a> to {@link ToolRunner#run(Tool, String[])} 
  * standard command-line options</a> to {@link ToolRunner#run(Tool, String[])} 
  * and only handle its custom arguments.</p>
  * and only handle its custom arguments.</p>
  * 
  * 

+ 47 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -197,6 +197,20 @@ for ldap providers in the same way as above does.
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>hadoop.security.groups.negative-cache.secs</name>
+  <value>30</value>
+  <description>
+    Expiration time for entries in the the negative user-to-group mapping
+    caching, in seconds. This is useful when invalid users are retrying
+    frequently. It is suggested to set a small value for this expiration, since
+    a transient error in group lookup could temporarily lock out a legitimate
+    user.
+
+    Set this to zero or negative value to disable negative user-to-group caching.
+  </description>
+</property>
+
 <property>
 <property>
   <name>hadoop.security.groups.cache.warn.after.ms</name>
   <name>hadoop.security.groups.cache.warn.after.ms</name>
   <value>5000</value>
   <value>5000</value>
@@ -1455,4 +1469,37 @@ for ldap providers in the same way as above does.
   <value>true</value>
   <value>true</value>
   <description>Don't cache 'har' filesystem instances.</description>
   <description>Don't cache 'har' filesystem instances.</description>
 </property>
 </property>
+
+<!--- KMSClientProvider configurations -->
+<property>
+  <name>hadoop.security.kms.client.encrypted.key.cache.size</name>
+  <value>500</value>
+  <description>
+    Size of the EncryptedKeyVersion cache Queue for each key
+  </description>
+</property>
+<property>
+  <name>hadoop.security.kms.client.encrypted.key.cache.low-watermark</name>
+  <value>0.3f</value>
+  <description>
+    If size of the EncryptedKeyVersion cache Queue falls below the
+    low watermark, this cache queue will be scheduled for a refill
+  </description>
+</property>
+<property>
+  <name>hadoop.security.kms.client.encrypted.key.cache.num.refill.threads</name>
+  <value>2</value>
+  <description>
+    Number of threads to use for refilling depleted EncryptedKeyVersion
+    cache Queues
+  </description>
+</property>
+<property>
+  <name>"hadoop.security.kms.client.encrypted.key.cache.expiry</name>
+  <value>43200000</value>
+  <description>
+    Cache expiry time for a Key, after which the cache Queue for this
+    key will be dropped. Default = 12hrs
+  </description>
+</property>
 </configuration>
 </configuration>

+ 3 - 1
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -127,7 +127,7 @@ User Commands
    Runs a HDFS filesystem checking utility.
    Runs a HDFS filesystem checking utility.
    See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
    See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
 
 
-   Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
+   Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
 
 
 *------------------+---------------------------------------------+
 *------------------+---------------------------------------------+
 ||  COMMAND_OPTION || Description
 ||  COMMAND_OPTION || Description
@@ -148,6 +148,8 @@ User Commands
 *------------------+---------------------------------------------+
 *------------------+---------------------------------------------+
 |   -racks         | Print out network topology for data-node locations.
 |   -racks         | Print out network topology for data-node locations.
 *------------------+---------------------------------------------+
 *------------------+---------------------------------------------+
+|   -showprogress  | Print out show progress in output. Default is OFF (no progress).
+*------------------+---------------------------------------------+
 
 
 * <<<fetchdt>>>
 * <<<fetchdt>>>
 
 

+ 2 - 0
hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm

@@ -116,6 +116,8 @@ Native Libraries Guide
 
 
      * zlib-development package (stable version >= 1.2.0)
      * zlib-development package (stable version >= 1.2.0)
 
 
+     * openssl-development package(e.g. libssl-dev)
+
    Once you installed the prerequisite packages use the standard hadoop
    Once you installed the prerequisite packages use the standard hadoop
    pom.xml file and pass along the native flag to build the native hadoop 
    pom.xml file and pass along the native flag to build the native hadoop 
    library:
    library:

+ 53 - 21
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSCacheKeyProvider.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java

@@ -15,17 +15,16 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.crypto.key.kms.server;
+package org.apache.hadoop.crypto.key;
+
+import java.util.Date;
 
 
-import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
-import java.util.Date;
-
-public class TestKMSCacheKeyProvider {
+public class TestCachingKeyProvider {
 
 
   @Test
   @Test
   public void testCurrentKey() throws Exception {
   public void testCurrentKey() throws Exception {
@@ -33,7 +32,7 @@ public class TestKMSCacheKeyProvider {
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
-    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
 
 
     // asserting caching
     // asserting caching
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
@@ -45,7 +44,7 @@ public class TestKMSCacheKeyProvider {
     Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
     Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
 
 
     // asserting no caching when key is not known
     // asserting no caching when key is not known
-    cache = new KMSCacheKeyProvider(mockProv, 100);
+    cache = new CachingKeyProvider(mockProv, 100, 100);
     Assert.assertEquals(null, cache.getCurrentKey("k2"));
     Assert.assertEquals(null, cache.getCurrentKey("k2"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
     Assert.assertEquals(null, cache.getCurrentKey("k2"));
     Assert.assertEquals(null, cache.getCurrentKey("k2"));
@@ -56,25 +55,56 @@ public class TestKMSCacheKeyProvider {
   public void testKeyVersion() throws Exception {
   public void testKeyVersion() throws Exception {
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
-    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
+        .thenReturn(mockKey);
     Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
     Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
-    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
 
 
     // asserting caching
     // asserting caching
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
-    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1))
+        .getKeyVersion(Mockito.eq("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
-    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1))
+        .getKeyVersion(Mockito.eq("k1@0"));
     Thread.sleep(200);
     Thread.sleep(200);
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
-    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(2))
+        .getKeyVersion(Mockito.eq("k1@0"));
 
 
     // asserting no caching when key is not known
     // asserting no caching when key is not known
-    cache = new KMSCacheKeyProvider(mockProv, 100);
+    cache = new CachingKeyProvider(mockProv, 100, 100);
     Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
     Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
-    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k2@0"));
+    Mockito.verify(mockProv, Mockito.times(1))
+        .getKeyVersion(Mockito.eq("k2@0"));
     Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
     Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
-    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k2@0"));
+    Mockito.verify(mockProv, Mockito.times(2))
+        .getKeyVersion(Mockito.eq("k2@0"));
+  }
+
+  @Test
+  public void testMetadata() throws Exception {
+    KeyProvider.Metadata mockMeta = Mockito.mock(KeyProvider.Metadata.class);
+    KeyProvider mockProv = Mockito.mock(KeyProvider.class);
+    Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
+    Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
+    KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
+
+    // asserting caching
+    Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
+    Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
+    Thread.sleep(200);
+    Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
+    Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k1"));
+
+    // asserting no caching when key is not known
+    cache = new CachingKeyProvider(mockProv, 100, 100);
+    Assert.assertEquals(null, cache.getMetadata("k2"));
+    Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k2"));
+    Assert.assertEquals(null, cache.getMetadata("k2"));
+    Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k2"));
   }
   }
 
 
   @Test
   @Test
@@ -82,7 +112,7 @@ public class TestKMSCacheKeyProvider {
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
-    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
     cache.rollNewVersion("k1");
     cache.rollNewVersion("k1");
@@ -100,21 +130,23 @@ public class TestKMSCacheKeyProvider {
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     KeyProvider mockProv = Mockito.mock(KeyProvider.class);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
     Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
-    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
+        .thenReturn(mockKey);
     Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
     Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
         new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
         new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
-    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
     Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
-    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1))
+        .getKeyVersion(Mockito.eq("k1@0"));
     cache.deleteKey("k1");
     cache.deleteKey("k1");
 
 
     // asserting the cache is purged
     // asserting the cache is purged
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
     Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
     Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
     Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
-    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(2))
+        .getKeyVersion(Mockito.eq("k1@0"));
   }
   }
-
 }
 }

+ 93 - 31
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java

@@ -17,50 +17,112 @@
  */
  */
 package org.apache.hadoop.crypto.key;
 package org.apache.hadoop.crypto.key;
 
 
+import java.net.URI;
+import java.security.SecureRandom;
+import java.util.Arrays;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
-import java.net.URI;
-import java.security.SecureRandom;
+
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 
 public class TestKeyProviderCryptoExtension {
 public class TestKeyProviderCryptoExtension {
 
 
   private static final String CIPHER = "AES";
   private static final String CIPHER = "AES";
+  private static final String ENCRYPTION_KEY_NAME = "fooKey";
 
 
-  @Test
-  public void testGenerateEncryptedKey() throws Exception {
-    Configuration conf = new Configuration();    
-    KeyProvider kp = 
-        new UserProvider.Factory().createProvider(new URI("user:///"), conf);
-    KeyProvider.Options options = new KeyProvider.Options(conf);
+  private static Configuration conf;
+  private static KeyProvider kp;
+  private static KeyProviderCryptoExtension kpExt;
+  private static KeyProvider.Options options;
+  private static KeyVersion encryptionKey;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new Configuration();
+    kp = new UserProvider.Factory().createProvider(new URI("user:///"), conf);
+    kpExt = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
+    options = new KeyProvider.Options(conf);
     options.setCipher(CIPHER);
     options.setCipher(CIPHER);
     options.setBitLength(128);
     options.setBitLength(128);
-    KeyProvider.KeyVersion kv = kp.createKey("foo", SecureRandom.getSeed(16),
-        options);
-    KeyProviderCryptoExtension kpExt = 
-        KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
-    
+    encryptionKey =
+        kp.createKey(ENCRYPTION_KEY_NAME, SecureRandom.getSeed(16), options);
+  }
+
+  @Test
+  public void testGenerateEncryptedKey() throws Exception {
+    // Generate a new EEK and check it
     KeyProviderCryptoExtension.EncryptedKeyVersion ek1 = 
     KeyProviderCryptoExtension.EncryptedKeyVersion ek1 = 
-        kpExt.generateEncryptedKey(kv);
-    Assert.assertEquals(KeyProviderCryptoExtension.EEK, 
-        ek1.getEncryptedKey().getVersionName());
-    Assert.assertNotNull(ek1.getEncryptedKey().getMaterial());
-    Assert.assertEquals(kv.getMaterial().length, 
-        ek1.getEncryptedKey().getMaterial().length);
-    KeyProvider.KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
-    Assert.assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
-    KeyProvider.KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
-    Assert.assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
-    Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
+        kpExt.generateEncryptedKey(encryptionKey.getName());
+    assertEquals("Version name of EEK should be EEK",
+        KeyProviderCryptoExtension.EEK,
+        ek1.getEncryptedKeyVersion().getVersionName());
+    assertEquals("Name of EEK should be encryption key name",
+        ENCRYPTION_KEY_NAME, ek1.getEncryptionKeyName());
+    assertNotNull("Expected encrypted key material",
+        ek1.getEncryptedKeyVersion().getMaterial());
+    assertEquals("Length of encryption key material and EEK material should "
+            + "be the same", encryptionKey.getMaterial().length,
+        ek1.getEncryptedKeyVersion().getMaterial().length
+    );
+
+    // Decrypt EEK into an EK and check it
+    KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
+    assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
+    assertEquals(encryptionKey.getMaterial().length, k1.getMaterial().length);
+    if (Arrays.equals(k1.getMaterial(), encryptionKey.getMaterial())) {
+      fail("Encrypted key material should not equal encryption key material");
+    }
+    if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),
+        encryptionKey.getMaterial())) {
+      fail("Encrypted key material should not equal decrypted key material");
+    }
+    // Decrypt it again and it should be the same
+    KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
+    assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
 
 
+    // Generate another EEK and make sure it's different from the first
     KeyProviderCryptoExtension.EncryptedKeyVersion ek2 = 
     KeyProviderCryptoExtension.EncryptedKeyVersion ek2 = 
-        kpExt.generateEncryptedKey(kv);
-    KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
-    boolean eq = true;
-    for (int i = 0; eq && i < ek2.getEncryptedKey().getMaterial().length; i++) {
-      eq = k2.getMaterial()[i] == k1.getMaterial()[i];
+        kpExt.generateEncryptedKey(encryptionKey.getName());
+    KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
+    if (Arrays.equals(k1.getMaterial(), k2.getMaterial())) {
+      fail("Generated EEKs should have different material!");
     }
     }
-    Assert.assertFalse(eq);
+    if (Arrays.equals(ek1.getEncryptedKeyIv(), ek2.getEncryptedKeyIv())) {
+      fail("Generated EEKs should have different IVs!");
+    }
+  }
+
+  @Test
+  public void testEncryptDecrypt() throws Exception {
+    // Get an EEK
+    KeyProviderCryptoExtension.EncryptedKeyVersion eek =
+        kpExt.generateEncryptedKey(encryptionKey.getName());
+    final byte[] encryptedKeyIv = eek.getEncryptedKeyIv();
+    final byte[] encryptedKeyMaterial = eek.getEncryptedKeyVersion()
+        .getMaterial();
+    // Decrypt it manually
+    Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+    cipher.init(Cipher.DECRYPT_MODE,
+        new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
+        new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
+            .deriveIV(encryptedKeyIv)));
+    final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
+    // Decrypt it with the API
+    KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek);
+    final byte[] apiMaterial = decryptedKey.getMaterial();
+
+    assertArrayEquals("Wrong key material from decryptEncryptedKey",
+        manualMaterial, apiMaterial);
   }
   }
 }
 }

+ 8 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -161,7 +161,7 @@ public class TestKeyShell {
     KeyShell ks = new KeyShell();
     KeyShell ks = new KeyShell();
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
     rc = ks.run(args1);
     rc = ks.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("key1 has not been created."));
     assertTrue(outContent.toString().contains("key1 has not been created."));
   }
   }
 
 
@@ -174,7 +174,7 @@ public class TestKeyShell {
     KeyShell ks = new KeyShell();
     KeyShell ks = new KeyShell();
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
     rc = ks.run(args1);
     rc = ks.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("key1 has not been created."));
     assertTrue(outContent.toString().contains("key1 has not been created."));
   }
   }
 
 
@@ -187,7 +187,7 @@ public class TestKeyShell {
     KeyShell ks = new KeyShell();
     KeyShell ks = new KeyShell();
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
     rc = ks.run(args1);
     rc = ks.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("There are no valid " +
     assertTrue(outContent.toString().contains("There are no valid " +
     		"KeyProviders configured."));
     		"KeyProviders configured."));
   }
   }
@@ -216,7 +216,7 @@ public class TestKeyShell {
     config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
     config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
     ks.setConf(config);
     ks.setConf(config);
     rc = ks.run(args1);
     rc = ks.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("There are no valid " +
     assertTrue(outContent.toString().contains("There are no valid " +
     		"KeyProviders configured."));
     		"KeyProviders configured."));
   }
   }
@@ -262,19 +262,19 @@ public class TestKeyShell {
     final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
     final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
             "--attr", "=bar"};
             "--attr", "=bar"};
     rc = ks.run(args2);
     rc = ks.run(args2);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
 
 
     /* Not in attribute = value form */
     /* Not in attribute = value form */
     outContent.reset();
     outContent.reset();
     args2[5] = "foo";
     args2[5] = "foo";
     rc = ks.run(args2);
     rc = ks.run(args2);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
 
 
     /* No attribute or value */
     /* No attribute or value */
     outContent.reset();
     outContent.reset();
     args2[5] = "=";
     args2[5] = "=";
     rc = ks.run(args2);
     rc = ks.run(args2);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
 
 
     /* Legal: attribute is a, value is b=c */
     /* Legal: attribute is a, value is b=c */
     outContent.reset();
     outContent.reset();
@@ -308,7 +308,7 @@ public class TestKeyShell {
             "--attr", "foo=bar",
             "--attr", "foo=bar",
             "--attr", "foo=glarch"};
             "--attr", "foo=glarch"};
     rc = ks.run(args4);
     rc = ks.run(args4);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
 
 
     /* Clean up to be a good citizen */
     /* Clean up to be a good citizen */
     deleteKey(ks, "keyattr1");
     deleteKey(ks, "keyattr1");

+ 190 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java

@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.util.Queue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.crypto.key.kms.ValueQueue;
+import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
+import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Sets;
+
+public class TestValueQueue {
+
+  private static class FillInfo {
+    final int num;
+    final String key;
+    FillInfo(int num, String key) {
+      this.num = num;
+      this.key = key;
+    }
+  }
+
+  private static class MockFiller implements QueueRefiller<String> {
+    final LinkedBlockingQueue<FillInfo> fillCalls =
+        new LinkedBlockingQueue<FillInfo>();
+    @Override
+    public void fillQueueForKey(String keyName, Queue<String> keyQueue,
+        int numValues) throws IOException {
+      fillCalls.add(new FillInfo(numValues, keyName));
+      for(int i = 0; i < numValues; i++) {
+        keyQueue.add("test");
+      }
+    }
+    public FillInfo getTop() throws InterruptedException {
+      return fillCalls.poll(500, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  /**
+   * Verifies that Queue is initially filled to "numInitValues"
+   */
+  @Test
+  public void testInitFill() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.1f, 300, 1,
+            SyncGenerationPolicy.ALL, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(1, filler.getTop().num);
+    vq.shutdown();
+  }
+
+  /**
+   * Verifies that Queue is initialized (Warmed-up) for provided keys
+   */
+  @Test
+  public void testWarmUp() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.5f, 300, 1,
+            SyncGenerationPolicy.ALL, filler);
+    vq.initializeQueuesForKeys("k1", "k2", "k3");
+    FillInfo[] fillInfos =
+      {filler.getTop(), filler.getTop(), filler.getTop()};
+    Assert.assertEquals(5, fillInfos[0].num);
+    Assert.assertEquals(5, fillInfos[1].num);
+    Assert.assertEquals(5, fillInfos[2].num);
+    Assert.assertEquals(Sets.newHashSet("k1", "k2", "k3"),
+        Sets.newHashSet(fillInfos[0].key,
+            fillInfos[1].key,
+            fillInfos[2].key));
+    vq.shutdown();
+  }
+
+  /**
+   * Verifies that the refill task is executed after "checkInterval" if
+   * num values below "lowWatermark"
+   */
+  @Test
+  public void testRefill() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.1f, 300, 1,
+            SyncGenerationPolicy.ALL, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(1, filler.getTop().num);
+    // Trigger refill
+    vq.getNext("k1");
+    Assert.assertEquals(1, filler.getTop().num);
+    Assert.assertEquals(10, filler.getTop().num);
+    vq.shutdown();
+  }
+
+  /**
+   * Verifies that the No refill Happens after "checkInterval" if
+   * num values above "lowWatermark"
+   */
+  @Test
+  public void testNoRefill() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.5f, 300, 1,
+            SyncGenerationPolicy.ALL, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(5, filler.getTop().num);
+    Assert.assertEquals(null, filler.getTop());
+    vq.shutdown();
+  }
+
+  /**
+   * Verify getAtMost when SyncGeneration Policy = ALL
+   */
+  @Test
+  public void testgetAtMostPolicyALL() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.1f, 300, 1,
+            SyncGenerationPolicy.ALL, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(1, filler.getTop().num);
+    // Drain completely
+    Assert.assertEquals(10, vq.getAtMost("k1", 10).size());
+    // Synchronous call
+    Assert.assertEquals(10, filler.getTop().num);
+    // Ask for more... return all
+    Assert.assertEquals(19, vq.getAtMost("k1", 19).size());
+    // Synchronous call (No Async call since num > lowWatermark)
+    Assert.assertEquals(19, filler.getTop().num);
+    vq.shutdown();
+  }
+
+  /**
+   * Verify getAtMost when SyncGeneration Policy = ALL
+   */
+  @Test
+  public void testgetAtMostPolicyATLEAST_ONE() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.3f, 300, 1,
+            SyncGenerationPolicy.ATLEAST_ONE, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(3, filler.getTop().num);
+    // Drain completely
+    Assert.assertEquals(2, vq.getAtMost("k1", 10).size());
+    // Asynch Refill call
+    Assert.assertEquals(10, filler.getTop().num);
+    vq.shutdown();
+  }
+
+  /**
+   * Verify getAtMost when SyncGeneration Policy = LOW_WATERMARK
+   */
+  @Test
+  public void testgetAtMostPolicyLOW_WATERMARK() throws Exception {
+    MockFiller filler = new MockFiller();
+    ValueQueue<String> vq =
+        new ValueQueue<String>(10, 0.3f, 300, 1,
+            SyncGenerationPolicy.LOW_WATERMARK, filler);
+    Assert.assertEquals("test", vq.getNext("k1"));
+    Assert.assertEquals(3, filler.getTop().num);
+    // Drain completely
+    Assert.assertEquals(3, vq.getAtMost("k1", 10).size());
+    // Synchronous call
+    Assert.assertEquals(1, filler.getTop().num);
+    // Asynch Refill call
+    Assert.assertEquals(10, filler.getTop().num);
+    vq.shutdown();
+  }
+}

+ 19 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java

@@ -45,15 +45,15 @@ public class TestStat extends FileSystemTestHelper {
     final String doesNotExist;
     final String doesNotExist;
     final String directory;
     final String directory;
     final String file;
     final String file;
-    final String symlink;
+    final String[] symlinks;
     final String stickydir;
     final String stickydir;
 
 
     StatOutput(String doesNotExist, String directory, String file,
     StatOutput(String doesNotExist, String directory, String file,
-        String symlink, String stickydir) {
+        String[] symlinks, String stickydir) {
       this.doesNotExist = doesNotExist;
       this.doesNotExist = doesNotExist;
       this.directory = directory;
       this.directory = directory;
       this.file = file;
       this.file = file;
-      this.symlink = symlink;
+      this.symlinks = symlinks;
       this.stickydir = stickydir;
       this.stickydir = stickydir;
     }
     }
 
 
@@ -78,10 +78,12 @@ public class TestStat extends FileSystemTestHelper {
       status = stat.getFileStatusForTesting();
       status = stat.getFileStatusForTesting();
       assertTrue(status.isFile());
       assertTrue(status.isFile());
 
 
-      br = new BufferedReader(new StringReader(symlink));
-      stat.parseExecResult(br);
-      status = stat.getFileStatusForTesting();
-      assertTrue(status.isSymlink());
+      for (String symlink : symlinks) {
+        br = new BufferedReader(new StringReader(symlink));
+        stat.parseExecResult(br);
+        status = stat.getFileStatusForTesting();
+        assertTrue(status.isSymlink());
+      }
 
 
       br = new BufferedReader(new StringReader(stickydir));
       br = new BufferedReader(new StringReader(stickydir));
       stat.parseExecResult(br);
       stat.parseExecResult(br);
@@ -93,22 +95,30 @@ public class TestStat extends FileSystemTestHelper {
 
 
   @Test(timeout=10000)
   @Test(timeout=10000)
   public void testStatLinux() throws Exception {
   public void testStatLinux() throws Exception {
+    String[] symlinks = new String[] {
+        "6,symbolic link,1373584236,1373584236,777,andrew,andrew,`link' -> `target'",
+        "6,symbolic link,1373584236,1373584236,777,andrew,andrew,'link' -> 'target'"
+    };
     StatOutput linux = new StatOutput(
     StatOutput linux = new StatOutput(
         "stat: cannot stat `watermelon': No such file or directory",
         "stat: cannot stat `watermelon': No such file or directory",
         "4096,directory,1373584236,1373586485,755,andrew,root,`.'",
         "4096,directory,1373584236,1373586485,755,andrew,root,`.'",
         "0,regular empty file,1373584228,1373584228,644,andrew,andrew,`target'",
         "0,regular empty file,1373584228,1373584228,644,andrew,andrew,`target'",
-        "6,symbolic link,1373584236,1373584236,777,andrew,andrew,`link' -> `target'",
+        symlinks,
         "4096,directory,1374622334,1375124212,1755,andrew,andrew,`stickydir'");
         "4096,directory,1374622334,1375124212,1755,andrew,andrew,`stickydir'");
     linux.test();
     linux.test();
   }
   }
 
 
   @Test(timeout=10000)
   @Test(timeout=10000)
   public void testStatFreeBSD() throws Exception {
   public void testStatFreeBSD() throws Exception {
+    String[] symlinks = new String[] {
+        "6,Symbolic Link,1373508941,1373508941,120755,awang,awang,`link' -> `target'"
+    };
+    
     StatOutput freebsd = new StatOutput(
     StatOutput freebsd = new StatOutput(
         "stat: symtest/link: stat: No such file or directory",
         "stat: symtest/link: stat: No such file or directory",
         "512,Directory,1373583695,1373583669,40755,awang,awang,`link' -> `'",
         "512,Directory,1373583695,1373583669,40755,awang,awang,`link' -> `'",
         "0,Regular File,1373508937,1373508937,100644,awang,awang,`link' -> `'",
         "0,Regular File,1373508937,1373508937,100644,awang,awang,`link' -> `'",
-        "6,Symbolic Link,1373508941,1373508941,120755,awang,awang,`link' -> `target'",
+        symlinks,
         "512,Directory,1375139537,1375139537,41755,awang,awang,`link' -> `'");
         "512,Directory,1375139537,1375139537,41755,awang,awang,`link' -> `'");
     freebsd.test();
     freebsd.test();
   }
   }

+ 54 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
 
 
@@ -28,9 +29,16 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+import org.apache.hadoop.fs.permission.AclEntry;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
@@ -38,6 +46,7 @@ import org.apache.hadoop.fs.viewfs.ViewFileSystem;
 import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
 import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -96,7 +105,6 @@ public class ViewFileSystemBaseTest {
     // in the test root
     // in the test root
     
     
     // Set up the defaultMT in the config with our mount point links
     // Set up the defaultMT in the config with our mount point links
-    //Configuration conf = new Configuration();
     conf = ViewFileSystemTestSetup.createConfig();
     conf = ViewFileSystemTestSetup.createConfig();
     setupMountPoints();
     setupMountPoints();
     fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
@@ -720,4 +728,49 @@ public class ViewFileSystemBaseTest {
     Assert.assertTrue("Other-readable permission not set!",
     Assert.assertTrue("Other-readable permission not set!",
         perms.getOtherAction().implies(FsAction.READ));
         perms.getOtherAction().implies(FsAction.READ));
   }
   }
+
+  /**
+   * Verify the behavior of ACL operations on paths above the root of
+   * any mount table entry.
+   */
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalModifyAclEntries() throws IOException {
+    fsView.modifyAclEntries(new Path("/internalDir"),
+        new ArrayList<AclEntry>());
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveAclEntries() throws IOException {
+    fsView.removeAclEntries(new Path("/internalDir"),
+        new ArrayList<AclEntry>());
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveDefaultAcl() throws IOException {
+    fsView.removeDefaultAcl(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveAcl() throws IOException {
+    fsView.removeAcl(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetAcl() throws IOException {
+    fsView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
+  }
+
+  @Test
+  public void testInternalGetAclStatus() throws IOException {
+    final UserGroupInformation currentUser =
+        UserGroupInformation.getCurrentUser();
+    AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
+    assertEquals(aclStatus.getOwner(), currentUser.getUserName());
+    assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
+    assertEquals(aclStatus.getEntries(),
+        AclUtil.getMinimalAcl(PERMISSION_555));
+    assertFalse(aclStatus.isStickyBit());
+  }
+
 }
 }

+ 52 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -22,10 +22,14 @@ import static org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus;
 import static org.apache.hadoop.fs.FileContextTestHelper.exists;
 import static org.apache.hadoop.fs.FileContextTestHelper.exists;
 import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
 import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
 import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
 import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -39,8 +43,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
 import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -695,4 +703,48 @@ public class ViewFsBaseTest {
   public void testInternalSetOwner() throws IOException {
   public void testInternalSetOwner() throws IOException {
     fcView.setOwner(new Path("/internalDir"), "foo", "bar");
     fcView.setOwner(new Path("/internalDir"), "foo", "bar");
   }
   }
+
+  /**
+   * Verify the behavior of ACL operations on paths above the root of
+   * any mount table entry.
+   */
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalModifyAclEntries() throws IOException {
+    fcView.modifyAclEntries(new Path("/internalDir"),
+        new ArrayList<AclEntry>());
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveAclEntries() throws IOException {
+    fcView.removeAclEntries(new Path("/internalDir"),
+        new ArrayList<AclEntry>());
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveDefaultAcl() throws IOException {
+    fcView.removeDefaultAcl(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveAcl() throws IOException {
+    fcView.removeAcl(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetAcl() throws IOException {
+    fcView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
+  }
+
+  @Test
+  public void testInternalGetAclStatus() throws IOException {
+    final UserGroupInformation currentUser =
+        UserGroupInformation.getCurrentUser();
+    AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
+    assertEquals(aclStatus.getOwner(), currentUser.getUserName());
+    assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
+    assertEquals(aclStatus.getEntries(),
+        AclUtil.getMinimalAcl(PERMISSION_555));
+    assertFalse(aclStatus.isStickyBit());
+  }
 }
 }

+ 22 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java

@@ -24,6 +24,7 @@ import java.nio.BufferUnderflowException;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.nio.charset.CharacterCodingException;
 import java.util.Random;
 import java.util.Random;
+import com.google.common.base.Charsets;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Bytes;
 
 
 /** Unit tests for LargeUTF8. */
 /** Unit tests for LargeUTF8. */
@@ -363,6 +364,27 @@ public class TestText extends TestCase {
       fail("testReadWriteOperations error !!!");
       fail("testReadWriteOperations error !!!");
     }        
     }        
   }
   }
+
+  public void testReadWithKnownLength() throws IOException {
+    String line = "hello world";
+    byte[] inputBytes = line.getBytes(Charsets.UTF_8);
+    DataInputBuffer in = new DataInputBuffer();
+    Text text = new Text();
+
+    in.reset(inputBytes, inputBytes.length);
+    text.readWithKnownLength(in, 5);
+    assertEquals("hello", text.toString());
+
+    // Read longer length, make sure it lengthens
+    in.reset(inputBytes, inputBytes.length);
+    text.readWithKnownLength(in, 7);
+    assertEquals("hello w", text.toString());
+
+    // Read shorter length, make sure it shortens
+    in.reset(inputBytes, inputBytes.length);
+    text.readWithKnownLength(in, 2);
+    assertEquals("he", text.toString());
+  }
   
   
   /**
   /**
    * test {@code Text.bytesToCodePoint(bytes) } 
    * test {@code Text.bytesToCodePoint(bytes) } 

+ 4 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java

@@ -327,8 +327,8 @@ public class MiniRPCBenchmark {
     String shortUserName =
     String shortUserName =
       UserGroupInformation.createRemoteUser(user).getShortUserName();
       UserGroupInformation.createRemoteUser(user).getShortUserName();
     try {
     try {
-      conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(shortUserName),
-          GROUP_NAME_1);
+      conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+              getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
       configureSuperUserIPAddresses(conf, shortUserName);
       configureSuperUserIPAddresses(conf, shortUserName);
       // start the server
       // start the server
       miniServer = new MiniServer(conf, user, keytabFile);
       miniServer = new MiniServer(conf, user, keytabFile);
@@ -411,7 +411,7 @@ public class MiniRPCBenchmark {
     }
     }
     builder.append("127.0.1.1,");
     builder.append("127.0.1.1,");
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
-        builder.toString());
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(superUserShortName), builder.toString());
   }
   }
 }
 }

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -496,6 +496,8 @@ public class TestRPC {
       caught = true;
       caught = true;
     }
     }
     assertTrue(caught);
     assertTrue(caught);
+    rb = getMetrics(server.rpcDetailedMetrics.name());
+    assertCounter("IOExceptionNumOps", 1L, rb);
 
 
     proxy.testServerGet();
     proxy.testServerGet();
 
 

+ 6 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java

@@ -60,12 +60,12 @@ public class TestGangliaMetrics {
   @Test
   @Test
   public void testTagsForPrefix() throws Exception {
   public void testTagsForPrefix() throws Exception {
     ConfigBuilder cb = new ConfigBuilder()
     ConfigBuilder cb = new ConfigBuilder()
-      .add("Test.sink.ganglia.tagsForPrefix.all", "*")
-      .add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
+      .add("test.sink.ganglia.tagsForPrefix.all", "*")
+      .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
               "NumActiveSources")
               "NumActiveSources")
-      .add("Test.sink.ganglia.tagsForPrefix.none", "");
+      .add("test.sink.ganglia.tagsForPrefix.none", "");
     GangliaSink30 sink = new GangliaSink30();
     GangliaSink30 sink = new GangliaSink30();
-    sink.init(cb.subset("Test.sink.ganglia"));
+    sink.init(cb.subset("test.sink.ganglia"));
 
 
     List<MetricsTag> tags = new ArrayList<MetricsTag>();
     List<MetricsTag> tags = new ArrayList<MetricsTag>();
     tags.add(new MetricsTag(MsInfo.Context, "all"));
     tags.add(new MetricsTag(MsInfo.Context, "all"));
@@ -98,8 +98,8 @@ public class TestGangliaMetrics {
   
   
   @Test public void testGangliaMetrics2() throws Exception {
   @Test public void testGangliaMetrics2() throws Exception {
     ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
     ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
-        .add("Test.sink.gsink30.context", "test") // filter out only "test"
-        .add("Test.sink.gsink31.context", "test") // filter out only "test"
+        .add("test.sink.gsink30.context", "test") // filter out only "test"
+        .add("test.sink.gsink31.context", "test") // filter out only "test"
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
 
 
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");

+ 34 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java

@@ -88,11 +88,11 @@ public class TestMetricsSystemImpl {
     DefaultMetricsSystem.shutdown();
     DefaultMetricsSystem.shutdown();
     new ConfigBuilder().add("*.period", 8)
     new ConfigBuilder().add("*.period", 8)
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
-        .add("Test.sink.test.class", TestSink.class.getName())
-        .add("Test.*.source.filter.exclude", "s0")
-        .add("Test.source.s1.metric.filter.exclude", "X*")
-        .add("Test.sink.sink1.metric.filter.exclude", "Y*")
-        .add("Test.sink.sink2.metric.filter.exclude", "Y*")
+        .add("test.sink.test.class", TestSink.class.getName())
+        .add("test.*.source.filter.exclude", "s0")
+        .add("test.source.s1.metric.filter.exclude", "X*")
+        .add("test.sink.sink1.metric.filter.exclude", "Y*")
+        .add("test.sink.sink2.metric.filter.exclude", "Y*")
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
     ms.start();
@@ -130,11 +130,11 @@ public class TestMetricsSystemImpl {
     DefaultMetricsSystem.shutdown(); 
     DefaultMetricsSystem.shutdown(); 
     new ConfigBuilder().add("*.period", 8)
     new ConfigBuilder().add("*.period", 8)
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
-        .add("Test.sink.test.class", TestSink.class.getName())
-        .add("Test.*.source.filter.exclude", "s0")
-        .add("Test.source.s1.metric.filter.exclude", "X*")
-        .add("Test.sink.sink1.metric.filter.exclude", "Y*")
-        .add("Test.sink.sink2.metric.filter.exclude", "Y*")
+        .add("test.sink.test.class", TestSink.class.getName())
+        .add("test.*.source.filter.exclude", "s0")
+        .add("test.source.s1.metric.filter.exclude", "X*")
+        .add("test.sink.sink1.metric.filter.exclude", "Y*")
+        .add("test.sink.sink2.metric.filter.exclude", "Y*")
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
         .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
     ms.start();
@@ -169,13 +169,14 @@ public class TestMetricsSystemImpl {
   @Test public void testMultiThreadedPublish() throws Exception {
   @Test public void testMultiThreadedPublish() throws Exception {
     final int numThreads = 10;
     final int numThreads = 10;
     new ConfigBuilder().add("*.period", 80)
     new ConfigBuilder().add("*.period", 80)
-      .add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
+      .add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
               numThreads)
               numThreads)
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
     ms.start();
+
     final CollectingSink sink = new CollectingSink(numThreads);
     final CollectingSink sink = new CollectingSink(numThreads);
-    ms.registerSink("Collector",
+    ms.registerSink("collector",
         "Collector of values from all threads.", sink);
         "Collector of values from all threads.", sink);
     final TestSource[] sources = new TestSource[numThreads];
     final TestSource[] sources = new TestSource[numThreads];
     final Thread[] threads = new Thread[numThreads];
     final Thread[] threads = new Thread[numThreads];
@@ -280,10 +281,10 @@ public class TestMetricsSystemImpl {
 
 
   @Test public void testHangingSink() {
   @Test public void testHangingSink() {
     new ConfigBuilder().add("*.period", 8)
     new ConfigBuilder().add("*.period", 8)
-      .add("Test.sink.test.class", TestSink.class.getName())
-      .add("Test.sink.hanging.retry.delay", "1")
-      .add("Test.sink.hanging.retry.backoff", "1.01")
-      .add("Test.sink.hanging.retry.count", "0")
+      .add("test.sink.test.class", TestSink.class.getName())
+      .add("test.sink.hanging.retry.delay", "1")
+      .add("test.sink.hanging.retry.backoff", "1.01")
+      .add("test.sink.hanging.retry.count", "0")
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
       .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     MetricsSystemImpl ms = new MetricsSystemImpl("Test");
     ms.start();
     ms.start();
@@ -379,6 +380,23 @@ public class TestMetricsSystemImpl {
     ms.shutdown();
     ms.shutdown();
   }
   }
 
 
+  @Test public void testUnregisterSource() {
+    MetricsSystem ms = new MetricsSystemImpl();
+    TestSource ts1 = new TestSource("ts1");
+    TestSource ts2 = new TestSource("ts2");
+    ms.register("ts1", "", ts1);
+    ms.register("ts2", "", ts2);
+    MetricsSource s1 = ms.getSource("ts1");
+    assertNotNull(s1);
+    // should work when metrics system is not started
+    ms.unregisterSource("ts1");
+    s1 = ms.getSource("ts1");
+    assertNull(s1);
+    MetricsSource s2 = ms.getSource("ts2");
+    assertNotNull(s2);
+    ms.shutdown();
+  }
+
   private void checkMetricsRecords(List<MetricsRecord> recs) {
   private void checkMetricsRecords(List<MetricsRecord> recs) {
     LOG.debug(recs);
     LOG.debug(recs);
     MetricsRecord r = recs.get(0);
     MetricsRecord r = recs.get(0);

+ 4 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java

@@ -105,7 +105,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[2] = dataNodes[3];
     testNodes[2] = dataNodes[3];
     testNodes[3] = dataNodes[0];
     testNodes[3] = dataNodes[0];
     cluster.sortByDistance(dataNodes[0], testNodes,
     cluster.sortByDistance(dataNodes[0], testNodes,
-        testNodes.length, 0xDEADBEEF);
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
     assertTrue(testNodes[2] == dataNodes[2]);
@@ -117,7 +117,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[2] = dataNodes[1];
     testNodes[2] = dataNodes[1];
     testNodes[3] = dataNodes[0];
     testNodes[3] = dataNodes[0];
     cluster.sortByDistance(dataNodes[0], testNodes,
     cluster.sortByDistance(dataNodes[0], testNodes,
-        testNodes.length, 0xDEADBEEF);
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[1]);
 
 
@@ -127,7 +127,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[2] = dataNodes[2];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
     testNodes[3] = dataNodes[0];
     cluster.sortByDistance(dataNodes[0], testNodes,
     cluster.sortByDistance(dataNodes[0], testNodes,
-        testNodes.length, 0xDEADBEEF);
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
     assertTrue(testNodes[1] == dataNodes[2]);
 
 
@@ -137,7 +137,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[2] = dataNodes[2];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
     testNodes[3] = dataNodes[0];
     cluster.sortByDistance(computeNode, testNodes,
     cluster.sortByDistance(computeNode, testNodes,
-        testNodes.length, 0xDEADBEEF);
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
     assertTrue(testNodes[1] == dataNodes[2]);
   }
   }

+ 14 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java

@@ -101,7 +101,8 @@ public class TestDoAsEffectiveUser {
     builder.append("127.0.1.1,");
     builder.append("127.0.1.1,");
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
     LOG.info("Local Ip addresses: "+builder.toString());
     LOG.info("Local Ip addresses: "+builder.toString());
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(superUserShortName),
         builder.toString());
         builder.toString());
   }
   }
   
   
@@ -181,8 +182,8 @@ public class TestDoAsEffectiveUser {
   @Test(timeout=4000)
   @Test(timeout=4000)
   public void testRealUserSetup() throws IOException {
   public void testRealUserSetup() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.setStrings(DefaultImpersonationProvider
-        .getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@@ -214,7 +215,8 @@ public class TestDoAsEffectiveUser {
   public void testRealUserAuthorizationSuccess() throws IOException {
   public void testRealUserAuthorizationSuccess() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group1");
         "group1");
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@@ -248,9 +250,11 @@ public class TestDoAsEffectiveUser {
   @Test
   @Test
   public void testRealUserIPAuthorizationFailure() throws IOException {
   public void testRealUserIPAuthorizationFailure() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),
         "20.20.20.20"); //Authorized IP address
         "20.20.20.20"); //Authorized IP address
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group1");
         "group1");
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@@ -293,8 +297,8 @@ public class TestDoAsEffectiveUser {
   @Test
   @Test
   public void testRealUserIPNotSpecified() throws IOException {
   public void testRealUserIPNotSpecified() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    conf.setStrings(DefaultImpersonationProvider
-        .getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(2).setVerbose(false).build();
         .setNumHandlers(2).setVerbose(false).build();
@@ -377,7 +381,8 @@ public class TestDoAsEffectiveUser {
   public void testRealUserGroupAuthorizationFailure() throws IOException {
   public void testRealUserGroupAuthorizationFailure() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group3");
         "group3");
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)

+ 56 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java

@@ -26,8 +26,11 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Set;
 import java.util.Set;
 
 
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.FakeTimer;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
@@ -94,6 +97,9 @@ public class TestGroupsCaching {
 
 
   @Test
   @Test
   public void testGroupsCaching() throws Exception {
   public void testGroupsCaching() throws Exception {
+    // Disable negative cache.
+    conf.setLong(
+        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
     Groups groups = new Groups(conf);
     Groups groups = new Groups(conf);
     groups.cacheGroupsAdd(Arrays.asList(myGroups));
     groups.cacheGroupsAdd(Arrays.asList(myGroups));
     groups.refresh();
     groups.refresh();
@@ -163,4 +169,54 @@ public class TestGroupsCaching {
         FakeunPrivilegedGroupMapping.invoked);
         FakeunPrivilegedGroupMapping.invoked);
 
 
   }
   }
+
+  @Test
+  public void testNegativeGroupCaching() throws Exception {
+    final String user = "negcache";
+    final String failMessage = "Did not throw IOException: ";
+    conf.setLong(
+        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
+    FakeTimer timer = new FakeTimer();
+    Groups groups = new Groups(conf, timer);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.addToBlackList(user);
+
+    // In the first attempt, the user will be put in the negative cache.
+    try {
+      groups.getGroups(user);
+      fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
+    } catch (IOException e) {
+      // Expects to raise exception for the first time. But the user will be
+      // put into the negative cache
+      GenericTestUtils.assertExceptionContains("No groups found for user", e);
+    }
+
+    // The second time, the user is in the negative cache.
+    try {
+      groups.getGroups(user);
+      fail(failMessage + "The user is in the negative cache.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No groups found for user", e);
+    }
+
+    // Brings back the backend user-group mapping service.
+    FakeGroupMapping.clearBlackList();
+
+    // It should still get groups from the negative cache.
+    try {
+      groups.getGroups(user);
+      fail(failMessage + "The user is still in the negative cache, even " +
+          "FakeGroupMapping has resumed.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No groups found for user", e);
+    }
+
+    // Let the elements in the negative cache expire.
+    timer.advance(4 * 1000);
+
+    // The groups for the user is expired in the negative cache, a new copy of
+    // groups for the user is fetched.
+    assertEquals(Arrays.asList(myGroups), groups.getGroups(user));
+  }
 }
 }

+ 18 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java

@@ -127,6 +127,22 @@ public class TestCredShell {
     		"CredentialProviders configured."));
     		"CredentialProviders configured."));
   }
   }
   
   
+  @Test
+  public void testPromptForCredentialWithEmptyPasswd() throws Exception {
+    String[] args1 = {"create", "credential1", "--provider", 
+        "jceks://file" + tmpDir + "/credstore.jceks"};
+    ArrayList<String> passwords = new ArrayList<String>();
+    passwords.add(null);
+    passwords.add("p@ssw0rd");
+    int rc = 0;
+    CredentialShell shell = new CredentialShell();
+    shell.setConf(new Configuration());
+    shell.setPasswordReader(new MockPasswordReader(passwords));
+    rc = shell.run(args1);
+    assertEquals(outContent.toString(), -1, rc);
+    assertTrue(outContent.toString().contains("Passwords don't match"));
+  }
+
   @Test
   @Test
   public void testPromptForCredential() throws Exception {
   public void testPromptForCredential() throws Exception {
     String[] args1 = {"create", "credential1", "--provider", 
     String[] args1 = {"create", "credential1", "--provider", 
@@ -142,7 +158,7 @@ public class TestCredShell {
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
         "created."));
         "created."));
-
+    
     String[] args2 = {"delete", "credential1", "--provider", 
     String[] args2 = {"delete", "credential1", "--provider", 
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     rc = shell.run(args2);
     rc = shell.run(args2);
@@ -162,7 +178,7 @@ public class TestCredShell {
     public char[] readPassword(String prompt) {
     public char[] readPassword(String prompt) {
       if (passwords.size() == 0) return null;
       if (passwords.size() == 0) return null;
       String pass = passwords.remove(0);
       String pass = passwords.remove(0);
-      return pass.toCharArray();
+      return pass == null ? null : pass.toCharArray();
     }
     }
 
 
     @Override
     @Override

+ 100 - 26
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java

@@ -111,10 +111,12 @@ public class TestProxyUsers {
       groupMappingClassName);
       groupMappingClassName);
 
 
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_NAME),
         StringUtils.join(",", Arrays.asList(NETGROUP_NAMES)));
         StringUtils.join(",", Arrays.asList(NETGROUP_NAMES)));
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_NAME),
         PROXY_IP);
         PROXY_IP);
     
     
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@@ -135,10 +137,12 @@ public class TestProxyUsers {
   public void testProxyUsers() throws Exception {
   public void testProxyUsers() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserGroupConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       PROXY_IP);
       PROXY_IP);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -168,10 +172,12 @@ public class TestProxyUsers {
   public void testProxyUsersWithUserConf() throws Exception {
   public void testProxyUsersWithUserConf() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserUserConfKey(REAL_USER_NAME),
         StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
         StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_NAME),
         PROXY_IP);
         PROXY_IP);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -202,10 +208,12 @@ public class TestProxyUsers {
   public void testWildcardGroup() {
   public void testWildcardGroup() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserGroupConfKey(REAL_USER_NAME),
       "*");
       "*");
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       PROXY_IP);
       PROXY_IP);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -236,10 +244,12 @@ public class TestProxyUsers {
   public void testWildcardUser() {
   public void testWildcardUser() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserUserConfKey(REAL_USER_NAME),
       "*");
       "*");
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       PROXY_IP);
       PROXY_IP);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -270,10 +280,12 @@ public class TestProxyUsers {
   public void testWildcardIP() {
   public void testWildcardIP() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserGroupConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       "*");
       "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -301,10 +313,12 @@ public class TestProxyUsers {
   public void testIPRange() {
   public void testIPRange() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_NAME),
         "*");
         "*");
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_NAME),
         PROXY_IP_RANGE);
         PROXY_IP_RANGE);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
 
@@ -324,16 +338,19 @@ public class TestProxyUsers {
   public void testWithDuplicateProxyGroups() throws Exception {
   public void testWithDuplicateProxyGroups() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserGroupConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
       StringUtils.join(",", Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       PROXY_IP);
       PROXY_IP);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     
     
     Collection<String> groupsToBeProxied = 
     Collection<String> groupsToBeProxied = 
         ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
         ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
-        DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME));
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_NAME));
     
     
     assertEquals (1,groupsToBeProxied.size());
     assertEquals (1,groupsToBeProxied.size());
   }
   }
@@ -342,16 +359,19 @@ public class TestProxyUsers {
   public void testWithDuplicateProxyHosts() throws Exception {
   public void testWithDuplicateProxyHosts() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider()
+          .getProxySuperuserGroupConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(PROXY_IP,PROXY_IP)));
       StringUtils.join(",", Arrays.asList(PROXY_IP,PROXY_IP)));
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     
     
     Collection<String> hosts = 
     Collection<String> hosts = 
         ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(
         ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(
-        DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME));
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_NAME));
     
     
     assertEquals (1,hosts.size());
     assertEquals (1,hosts.size());
   }
   }
@@ -391,26 +411,73 @@ public class TestProxyUsers {
   public void testWithProxyGroupsAndUsersWithSpaces() throws Exception {
   public void testWithProxyGroupsAndUsersWithSpaces() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserUserConfKey(REAL_USER_NAME),
         StringUtils.join(",", Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME, "ONEMORE")));
         StringUtils.join(",", Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME, "ONEMORE")));
 
 
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserGroupConfKey(REAL_USER_NAME),
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
       StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
     
     
     conf.set(
     conf.set(
-      DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      DefaultImpersonationProvider.getTestProvider().
+          getProxySuperuserIpConfKey(REAL_USER_NAME),
       PROXY_IP);
       PROXY_IP);
     
     
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     
     
     Collection<String> groupsToBeProxied = 
     Collection<String> groupsToBeProxied = 
         ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
         ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
-        DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME));
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_NAME));
     
     
     assertEquals (GROUP_NAMES.length, groupsToBeProxied.size());
     assertEquals (GROUP_NAMES.length, groupsToBeProxied.size());
   }
   }
 
 
+  @Test(expected = IllegalArgumentException.class)
+  public void testProxyUsersWithNullPrefix() throws Exception {
+    ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false), 
+        null);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testProxyUsersWithEmptyPrefix() throws Exception {
+    ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false), 
+        "");
+  }
+
+  @Test
+  public void testProxyUsersWithCustomPrefix() throws Exception {
+    Configuration conf = new Configuration(false);
+    conf.set("x." + REAL_USER_NAME + ".users",
+        StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
+    conf.set("x." + REAL_USER_NAME+ ".hosts", PROXY_IP);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "x");
+
+
+    // First try proxying a user that's allowed
+    UserGroupInformation realUserUgi = UserGroupInformation
+        .createRemoteUser(REAL_USER_NAME);
+    UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
+
+    // From good IP
+    assertAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+
+    // Now try proxying a user that's not allowed
+    realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
+    proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
+
+    // From good IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+  }
+
 
 
   private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
   private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
     try {
     try {
@@ -430,6 +497,11 @@ public class TestProxyUsers {
   }
   }
 
 
   static class TestDummyImpersonationProvider implements ImpersonationProvider {
   static class TestDummyImpersonationProvider implements ImpersonationProvider {
+
+    @Override
+    public void init(String configurationPrefix) {
+    }
+
     /**
     /**
      * Authorize a user (superuser) to impersonate another user (user1) if the 
      * Authorize a user (superuser) to impersonate another user (user1) if the 
      * superuser belongs to the group "sudo_user1" .
      * superuser belongs to the group "sudo_user1" .
@@ -460,11 +532,13 @@ public class TestProxyUsers {
   public static void loadTest(String ipString, int testRange) {
   public static void loadTest(String ipString, int testRange) {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER_NAME),
         StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
         StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
 
 
     conf.set(
     conf.set(
-        DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
+        DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(REAL_USER_NAME),
         ipString
         ipString
         );
         );
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

+ 52 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * FakeTimer can be used for test purposes to control the return values
+ * from {{@link Timer}}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class FakeTimer extends Timer {
+  private long nowMillis;
+
+  /** Constructs a FakeTimer with a non-zero value */
+  public FakeTimer() {
+    nowMillis = 1000;  // Initialize with a non-trivial value.
+  }
+
+  @Override
+  public long now() {
+    return nowMillis;
+  }
+
+  @Override
+  public long monotonicNow() {
+    return nowMillis;
+  }
+
+  /** Increases the time by milliseconds */
+  public void advance(long advMillis) {
+    nowMillis += advMillis;
+  }
+}

+ 15 - 0
hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml

@@ -79,4 +79,19 @@
     </description>
     </description>
   </property>
   </property>
 
 
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey CryptoExtension operations
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decrypt EncryptedKey CryptoExtension operations
+    </description>
+  </property>
 </configuration>
 </configuration>

+ 149 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto.key.kms.server;
+
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.kms.ValueQueue;
+import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
+
+/**
+ * A {@link KeyProviderCryptoExtension} that pre-generates and caches encrypted 
+ * keys.
+ */
+@InterfaceAudience.Private
+public class EagerKeyGeneratorKeyProviderCryptoExtension 
+    extends KeyProviderCryptoExtension {
+
+  private static final String KEY_CACHE_PREFIX =
+      "hadoop.security.kms.encrypted.key.cache.";
+
+  public static final String KMS_KEY_CACHE_SIZE =
+      KEY_CACHE_PREFIX + "size";
+  public static final int KMS_KEY_CACHE_SIZE_DEFAULT = 100;
+
+  public static final String KMS_KEY_CACHE_LOW_WATERMARK =
+      KEY_CACHE_PREFIX + "low.watermark";
+  public static final float KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT = 0.30f;
+
+  public static final String KMS_KEY_CACHE_EXPIRY_MS =
+      KEY_CACHE_PREFIX + "expiry";
+  public static final int KMS_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
+
+  public static final String KMS_KEY_CACHE_NUM_REFILL_THREADS =
+      KEY_CACHE_PREFIX + "num.fill.threads";
+  public static final int KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT = 2;
+
+
+  private static class CryptoExtension 
+      implements KeyProviderCryptoExtension.CryptoExtension {
+
+    private class EncryptedQueueRefiller implements
+        ValueQueue.QueueRefiller<EncryptedKeyVersion> {
+
+      @Override
+      public void fillQueueForKey(String keyName,
+          Queue<EncryptedKeyVersion> keyQueue, int numKeys) throws IOException {
+        List<EncryptedKeyVersion> retEdeks =
+            new LinkedList<EncryptedKeyVersion>();
+        for (int i = 0; i < numKeys; i++) {
+          try {
+            retEdeks.add(keyProviderCryptoExtension.generateEncryptedKey(
+                keyName));
+          } catch (GeneralSecurityException e) {
+            throw new IOException(e);
+          }
+        }
+        keyQueue.addAll(retEdeks);
+      }
+    }
+
+    private KeyProviderCryptoExtension keyProviderCryptoExtension;
+    private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
+
+    public CryptoExtension(Configuration conf, 
+        KeyProviderCryptoExtension keyProviderCryptoExtension) {
+      this.keyProviderCryptoExtension = keyProviderCryptoExtension;
+      encKeyVersionQueue =
+          new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
+              conf.getInt(KMS_KEY_CACHE_SIZE,
+                  KMS_KEY_CACHE_SIZE_DEFAULT),
+              conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
+                  KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
+              conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
+                  KMS_KEY_CACHE_EXPIRY_DEFAULT),
+              conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
+                  KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
+              SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
+          );
+    }
+
+    @Override
+    public void warmUpEncryptedKeys(String... keyNames) throws
+                                                        IOException {
+      try {
+        encKeyVersionQueue.initializeQueuesForKeys(keyNames);
+      } catch (ExecutionException e) {
+        throw new IOException(e);
+      }
+    }
+
+    @Override
+    public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
+        throws IOException, GeneralSecurityException {
+      try {
+        return encKeyVersionQueue.getNext(encryptionKeyName);
+      } catch (ExecutionException e) {
+        throw new IOException(e);
+      }
+    }
+
+    @Override
+    public KeyVersion
+    decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
+        throws IOException, GeneralSecurityException {
+      return keyProviderCryptoExtension.decryptEncryptedKey(
+          encryptedKeyVersion);
+    }
+  }
+
+  /**
+   * This class is a proxy for a <code>KeyProviderCryptoExtension</code> that
+   * decorates the underlying <code>CryptoExtension</code> with one that eagerly
+   * caches pre-generated Encrypted Keys using a <code>ValueQueue</code>
+   * 
+   * @param conf Configuration object to load parameters from
+   * @param keyProviderCryptoExtension <code>KeyProviderCryptoExtension</code>
+   * to delegate calls to.
+   */
+  public EagerKeyGeneratorKeyProviderCryptoExtension(Configuration conf,
+      KeyProviderCryptoExtension keyProviderCryptoExtension) {
+    super(keyProviderCryptoExtension, 
+        new CryptoExtension(conf, keyProviderCryptoExtension));
+  }
+
+}

+ 96 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -29,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.POST;
 import javax.ws.rs.Path;
 import javax.ws.rs.Path;
@@ -39,10 +42,14 @@ import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.SecurityContext;
 import javax.ws.rs.core.SecurityContext;
+
+import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.security.Principal;
 import java.security.Principal;
 import java.text.MessageFormat;
 import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
@@ -61,8 +68,10 @@ public class KMS {
   private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
   private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
   private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
   private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
   private static final String GET_METADATA = "GET_METADATA";
   private static final String GET_METADATA = "GET_METADATA";
+  private static final String GENERATE_EEK = "GENERATE_EEK";
+  private static final String DECRYPT_EEK = "DECRYPT_EEK";
 
 
-  private KeyProvider provider;
+  private KeyProviderCryptoExtension provider;
 
 
   public KMS() throws Exception {
   public KMS() throws Exception {
     provider = KMSWebApp.getKeyProvider();
     provider = KMSWebApp.getKeyProvider();
@@ -289,6 +298,92 @@ public class KMS {
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
+  @SuppressWarnings({ "rawtypes", "unchecked" })
+  @GET
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
+      KMSRESTConstants.EEK_SUB_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response generateEncryptedKeys(
+          @Context SecurityContext securityContext,
+          @PathParam("name") String name,
+          @QueryParam(KMSRESTConstants.EEK_OP) String edekOp,
+          @DefaultValue("1")
+          @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) int numKeys)
+          throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    KMSClientProvider.checkNotNull(edekOp, "eekOp");
+
+    Object retJSON;
+    if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
+      assertAccess(KMSACLs.Type.GENERATE_EEK, user, GENERATE_EEK, name);
+
+      List<EncryptedKeyVersion> retEdeks =
+          new LinkedList<EncryptedKeyVersion>();
+      try {
+        for (int i = 0; i < numKeys; i ++) {
+          retEdeks.add(provider.generateEncryptedKey(name));
+        }
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+      KMSAudit.ok(user, GENERATE_EEK, name, "");
+      retJSON = new ArrayList();
+      for (EncryptedKeyVersion edek : retEdeks) {
+        ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
+      }
+    } else {
+      throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
+          " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
+          KMSRESTConstants.EEK_DECRYPT);
+    }
+    KMSWebApp.getGenerateEEKCallsMeter().mark();
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+        .build();
+  }
+
+  @SuppressWarnings("rawtypes")
+  @POST
+  @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
+      KMSRESTConstants.EEK_SUB_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response decryptEncryptedKey(@Context SecurityContext securityContext,
+      @PathParam("versionName") String versionName,
+      @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
+      Map jsonPayload)
+      throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(versionName, "versionName");
+    KMSClientProvider.checkNotNull(eekOp, "eekOp");
+
+    String keyName = (String) jsonPayload.get(KMSRESTConstants.NAME_FIELD);
+    String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
+    String encMaterialStr = 
+        (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
+    Object retJSON;
+    if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
+      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName);
+      KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
+      byte[] iv = Base64.decodeBase64(ivStr);
+      KMSClientProvider.checkNotNull(encMaterialStr,
+          KMSRESTConstants.MATERIAL_FIELD);
+      byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
+      KeyProvider.KeyVersion retKeyVersion =
+          provider.decryptEncryptedKey(
+              new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
+                  iv, KeyProviderCryptoExtension.EEK, encMaterial));
+      retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
+      KMSAudit.ok(user, DECRYPT_EEK, versionName, "");
+    } else {
+      throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
+          " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
+          KMSRESTConstants.EEK_DECRYPT);
+    }
+    KMSWebApp.getDecryptEEKCallsMeter().mark();
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+        .build();
+  }
+
   @GET
   @GET
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
       KMSRESTConstants.VERSIONS_SUB_RESOURCE)
       KMSRESTConstants.VERSIONS_SUB_RESOURCE)

+ 12 - 24
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -28,20 +29,20 @@ import java.util.Map;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 
 /**
 /**
  * Provides access to the <code>AccessControlList</code>s used by KMS,
  * Provides access to the <code>AccessControlList</code>s used by KMS,
  * hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
  * hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
  * are defined has been updated.
  * are defined has been updated.
  */
  */
+@InterfaceAudience.Private
 public class KMSACLs implements Runnable {
 public class KMSACLs implements Runnable {
   private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
   private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
 
 
 
 
   public enum Type {
   public enum Type {
-    CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA, SET_KEY_MATERIAL;
+    CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
+    SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
 
 
     public String getConfigKey() {
     public String getConfigKey() {
       return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
       return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
@@ -52,13 +53,11 @@ public class KMSACLs implements Runnable {
 
 
   public static final int RELOADER_SLEEP_MILLIS = 1000;
   public static final int RELOADER_SLEEP_MILLIS = 1000;
 
 
-  Map<Type, AccessControlList> acls;
-  private ReadWriteLock lock;
+  private volatile Map<Type, AccessControlList> acls;
   private ScheduledExecutorService executorService;
   private ScheduledExecutorService executorService;
   private long lastReload;
   private long lastReload;
 
 
   KMSACLs(Configuration conf) {
   KMSACLs(Configuration conf) {
-    lock = new ReentrantReadWriteLock();
     if (conf == null) {
     if (conf == null) {
       conf = loadACLs();
       conf = loadACLs();
     }
     }
@@ -70,17 +69,13 @@ public class KMSACLs implements Runnable {
   }
   }
 
 
   private void setACLs(Configuration conf) {
   private void setACLs(Configuration conf) {
-    lock.writeLock().lock();
-    try {
-      acls = new HashMap<Type, AccessControlList>();
-      for (Type aclType : Type.values()) {
-        String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
-        acls.put(aclType, new AccessControlList(aclStr));
-        LOG.info("'{}' ACL '{}'", aclType, aclStr);
-      }
-    } finally {
-      lock.writeLock().unlock();
+    Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
+    for (Type aclType : Type.values()) {
+      String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
+      tempAcls.put(aclType, new AccessControlList(aclStr));
+      LOG.info("'{}' ACL '{}'", aclType, aclStr);
     }
     }
+    acls = tempAcls;
   }
   }
 
 
   @Override
   @Override
@@ -120,14 +115,7 @@ public class KMSACLs implements Runnable {
 
 
   public boolean hasAccess(Type type, String user) {
   public boolean hasAccess(Type type, String user) {
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
-    AccessControlList acl = null;
-    lock.readLock().lock();
-    try {
-      acl = acls.get(type);
-    } finally {
-      lock.readLock().unlock();
-    }
-    return acl.isUserAllowed(ugi);
+    return acls.get(type).isUserAllowed(ugi);
   }
   }
 
 
 }
 }

+ 0 - 177
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java

@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.crypto.key.kms.server;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import org.apache.hadoop.crypto.key.KeyProvider;
-
-import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A <code>KeyProvider</code> proxy implementation providing a short lived
- * cache for <code>KeyVersions</code> to avoid burst of requests to hit the
- * underlying <code>KeyProvider</code>.
- */
-public class KMSCacheKeyProvider extends KeyProvider {
-  private final KeyProvider provider;
-  private LoadingCache<String, KeyVersion> keyVersionCache;
-  private LoadingCache<String, KeyVersion> currentKeyCache;
-
-  private static class KeyNotFoundException extends Exception {
-    private static final long serialVersionUID = 1L;
-  }
-
-  public KMSCacheKeyProvider(KeyProvider prov, long timeoutMillis) {
-    this.provider =  prov;
-    keyVersionCache = CacheBuilder.newBuilder().expireAfterAccess(timeoutMillis,
-        TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
-      @Override
-      public KeyVersion load(String key) throws Exception {
-        KeyVersion kv = provider.getKeyVersion(key);
-        if (kv == null) {
-          throw new KeyNotFoundException();
-        }
-        return kv;
-      }
-    });
-    // for current key we don't want to go stale for more than 1 sec
-    currentKeyCache = CacheBuilder.newBuilder().expireAfterWrite(1000,
-        TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
-      @Override
-      public KeyVersion load(String key) throws Exception {
-        KeyVersion kv =  provider.getCurrentKey(key);
-        if (kv == null) {
-          throw new KeyNotFoundException();
-        }
-        return kv;
-      }
-    });
-  }
-
-  @Override
-  public KeyVersion getCurrentKey(String name) throws IOException {
-    try {
-      return currentKeyCache.get(name);
-    } catch (ExecutionException ex) {
-      Throwable cause = ex.getCause();
-      if (cause instanceof KeyNotFoundException) {
-        return null;
-      } else if (cause instanceof IOException) {
-        throw (IOException) cause;
-      } else {
-        throw new IOException(cause);
-      }
-    }
-  }
-
-  @Override
-  public KeyVersion getKeyVersion(String versionName)
-      throws IOException {
-    try {
-      return keyVersionCache.get(versionName);
-    } catch (ExecutionException ex) {
-      Throwable cause = ex.getCause();
-      if (cause instanceof KeyNotFoundException) {
-        return null;
-      } else if (cause instanceof IOException) {
-        throw (IOException) cause;
-      } else {
-        throw new IOException(cause);
-      }
-    }
-  }
-
-  @Override
-  public List<String> getKeys() throws IOException {
-    return provider.getKeys();
-  }
-
-  @Override
-  public List<KeyVersion> getKeyVersions(String name)
-      throws IOException {
-    return provider.getKeyVersions(name);
-  }
-
-  @Override
-  public Metadata getMetadata(String name) throws IOException {
-    return provider.getMetadata(name);
-  }
-
-  @Override
-  public KeyVersion createKey(String name, byte[] material,
-      Options options) throws IOException {
-    return provider.createKey(name, material, options);
-  }
-
-  @Override
-  public KeyVersion createKey(String name,
-      Options options)
-      throws NoSuchAlgorithmException, IOException {
-    return provider.createKey(name, options);
-  }
-
-  @Override
-  public void deleteKey(String name) throws IOException {
-    provider.deleteKey(name);
-    currentKeyCache.invalidate(name);
-    // invalidating all key versions as we don't know which ones belonged to the
-    // deleted key
-    keyVersionCache.invalidateAll();
-  }
-
-  @Override
-  public KeyVersion rollNewVersion(String name, byte[] material)
-      throws IOException {
-    KeyVersion key = provider.rollNewVersion(name, material);
-    currentKeyCache.invalidate(name);
-    return key;
-  }
-
-  @Override
-  public KeyVersion rollNewVersion(String name)
-      throws NoSuchAlgorithmException, IOException {
-    KeyVersion key = provider.rollNewVersion(name);
-    currentKeyCache.invalidate(name);
-    return key;
-  }
-
-  @Override
-  public void flush() throws IOException {
-    provider.flush();
-  }
-
-  @Override
-  public Metadata[] getKeysMetadata(String ... keyNames)
-      throws IOException {
-    return provider.getKeysMetadata(keyNames);
-  }
-
-  @Override
-  public boolean isTransient() {
-    return provider.isTransient();
-  }
-
-}

+ 13 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -34,9 +34,21 @@ public class KMSConfiguration {
 
 
   public static final String CONFIG_PREFIX = "hadoop.kms.";
   public static final String CONFIG_PREFIX = "hadoop.kms.";
 
 
+  // Property to Enable/Disable Caching
+  public static final String KEY_CACHE_ENABLE = CONFIG_PREFIX +
+      "cache.enable";
+  // Timeout for the Key and Metadata Cache
   public static final String KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
   public static final String KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
       "cache.timeout.ms";
       "cache.timeout.ms";
-  public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 1000; // 10 secs
+  // TImeout for the Current Key cache
+  public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
+      "current.key.cache.timeout.ms";
+
+  public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
+  // 10 mins
+  public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
+  // 30 secs
+  public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
 
 
   static Configuration getConfiguration(boolean loadHadoopDefaults,
   static Configuration getConfiguration(boolean loadHadoopDefaults,
       String ... resources) {
       String ... resources) {

+ 20 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java

@@ -17,8 +17,10 @@
  */
  */
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -39,7 +41,9 @@ public class KMSServerJSONUtils {
           keyVersion.getName());
           keyVersion.getName());
       json.put(KMSRESTConstants.VERSION_NAME_FIELD,
       json.put(KMSRESTConstants.VERSION_NAME_FIELD,
           keyVersion.getVersionName());
           keyVersion.getVersionName());
-      json.put(KMSRESTConstants.MATERIAL_FIELD, keyVersion.getMaterial());
+      json.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64URLSafeString(
+              keyVersion.getMaterial()));
     }
     }
     return json;
     return json;
   }
   }
@@ -55,6 +59,21 @@ public class KMSServerJSONUtils {
     return json;
     return json;
   }
   }
 
 
+  @SuppressWarnings("unchecked")
+  public static Map toJSON(EncryptedKeyVersion encryptedKeyVersion) {
+    Map json = new LinkedHashMap();
+    if (encryptedKeyVersion != null) {
+      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
+          encryptedKeyVersion.getEncryptionKeyVersionName());
+      json.put(KMSRESTConstants.IV_FIELD,
+          Base64.encodeBase64URLSafeString(
+              encryptedKeyVersion.getEncryptedKeyIv()));
+      json.put(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD,
+          toJSON(encryptedKeyVersion.getEncryptedKeyVersion()));
+    }
+    return json;
+  }
+
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
   public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
     Map json = new LinkedHashMap();
     Map json = new LinkedHashMap();

+ 42 - 8
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -20,9 +20,12 @@ package org.apache.hadoop.crypto.key.kms.server;
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.Meter;
 import com.codahale.metrics.Meter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.MetricRegistry;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.CachingKeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -34,6 +37,7 @@ import org.slf4j.bridge.SLF4JBridgeHandler;
 
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
 import javax.servlet.ServletContextListener;
+
 import java.io.File;
 import java.io.File;
 import java.net.URL;
 import java.net.URL;
 import java.util.List;
 import java.util.List;
@@ -54,6 +58,10 @@ public class KMSWebApp implements ServletContextListener {
       "unauthorized.calls.meter";
       "unauthorized.calls.meter";
   private static final String UNAUTHENTICATED_CALLS_METER = METRICS_PREFIX +
   private static final String UNAUTHENTICATED_CALLS_METER = METRICS_PREFIX +
       "unauthenticated.calls.meter";
       "unauthenticated.calls.meter";
+  private static final String GENERATE_EEK_METER = METRICS_PREFIX +
+      "generate_eek.calls.meter";
+  private static final String DECRYPT_EEK_METER = METRICS_PREFIX +
+      "decrypt_eek.calls.meter";
 
 
   private static Logger LOG;
   private static Logger LOG;
   private static MetricRegistry metricRegistry;
   private static MetricRegistry metricRegistry;
@@ -65,8 +73,10 @@ public class KMSWebApp implements ServletContextListener {
   private static Meter keyCallsMeter;
   private static Meter keyCallsMeter;
   private static Meter unauthorizedCallsMeter;
   private static Meter unauthorizedCallsMeter;
   private static Meter unauthenticatedCallsMeter;
   private static Meter unauthenticatedCallsMeter;
+  private static Meter decryptEEKCallsMeter;
+  private static Meter generateEEKCallsMeter;
   private static Meter invalidCallsMeter;
   private static Meter invalidCallsMeter;
-  private static KeyProvider keyProvider;
+  private static KeyProviderCryptoExtension keyProviderCryptoExtension;
 
 
   static {
   static {
     SLF4JBridgeHandler.removeHandlersForRootLogger();
     SLF4JBridgeHandler.removeHandlersForRootLogger();
@@ -121,6 +131,10 @@ public class KMSWebApp implements ServletContextListener {
       metricRegistry = new MetricRegistry();
       metricRegistry = new MetricRegistry();
       jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
       jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
       jmxReporter.start();
       jmxReporter.start();
+      generateEEKCallsMeter = metricRegistry.register(GENERATE_EEK_METER,
+          new Meter());
+      decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER,
+          new Meter());
       adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
       adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
       keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
       keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
       invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
       invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
@@ -149,11 +163,23 @@ public class KMSWebApp implements ServletContextListener {
             "the first provider",
             "the first provider",
             kmsConf.get(KeyProviderFactory.KEY_PROVIDER_PATH));
             kmsConf.get(KeyProviderFactory.KEY_PROVIDER_PATH));
       }
       }
-      keyProvider = providers.get(0);
-      long timeOutMillis =
-          kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
-              KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
-      keyProvider = new KMSCacheKeyProvider(keyProvider, timeOutMillis);
+      KeyProvider keyProvider = providers.get(0);
+      if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE,
+          KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
+        long keyTimeOutMillis =
+            kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
+                KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
+        long currKeyTimeOutMillis =
+            kmsConf.getLong(KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_KEY,
+                KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_DEFAULT);
+        keyProvider = new CachingKeyProvider(keyProvider, keyTimeOutMillis,
+            currKeyTimeOutMillis);
+      }
+      keyProviderCryptoExtension = KeyProviderCryptoExtension.
+          createKeyProviderCryptoExtension(keyProvider);
+      keyProviderCryptoExtension = 
+          new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf, 
+              keyProviderCryptoExtension);
 
 
       LOG.info("KMS Started");
       LOG.info("KMS Started");
     } catch (Throwable ex) {
     } catch (Throwable ex) {
@@ -200,6 +226,14 @@ public class KMSWebApp implements ServletContextListener {
     return invalidCallsMeter;
     return invalidCallsMeter;
   }
   }
 
 
+  public static Meter getGenerateEEKCallsMeter() {
+    return generateEEKCallsMeter;
+  }
+
+  public static Meter getDecryptEEKCallsMeter() {
+    return decryptEEKCallsMeter;
+  }
+
   public static Meter getUnauthorizedCallsMeter() {
   public static Meter getUnauthorizedCallsMeter() {
     return unauthorizedCallsMeter;
     return unauthorizedCallsMeter;
   }
   }
@@ -208,7 +242,7 @@ public class KMSWebApp implements ServletContextListener {
     return unauthenticatedCallsMeter;
     return unauthenticatedCallsMeter;
   }
   }
 
 
-  public static KeyProvider getKeyProvider() {
-    return keyProvider;
+  public static KeyProviderCryptoExtension getKeyProvider() {
+    return keyProviderCryptoExtension;
   }
   }
 }
 }

+ 103 - 7
hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm

@@ -72,22 +72,35 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
   KMS caches keys for short period of time to avoid excessive hits to the
   KMS caches keys for short period of time to avoid excessive hits to the
   underlying key provider.
   underlying key provider.
 
 
-  The cache is used with the following 2 methods only, <<<getCurrentKey()>>>
-  and <<<getKeyVersion()>>>.
+  The Cache is enabled by default (can be dissabled by setting the
+  <<<hadoop.kms.cache.enable>>> boolean property to false)
+
+  The cache is used with the following 3 methods only, <<<getCurrentKey()>>>
+  and <<<getKeyVersion()>>> and <<<getMetadata()>>>.
 
 
   For the <<<getCurrentKey()>>> method, cached entries are kept for a maximum
   For the <<<getCurrentKey()>>> method, cached entries are kept for a maximum
-  of 1000 millisecond regardless the number of times the key is being access
+  of 30000 millisecond regardless the number of times the key is being access
   (to avoid stale keys to be considered current).
   (to avoid stale keys to be considered current).
 
 
   For the <<<getKeyVersion()>>> method, cached entries are kept with a default
   For the <<<getKeyVersion()>>> method, cached entries are kept with a default
-  inactivity timeout of 10000 milliseconds. This time out is configurable via
-  the following property in the <<<etc/hadoop/kms-site.xml>>> configuration
-  file:
+  inactivity timeout of 600000 milliseconds (10 mins). This time out is
+  configurable via the following property in the <<<etc/hadoop/kms-site.xml>>>
+  configuration file:
 
 
 +---+
 +---+
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+  </property>
+
   <property>
   <property>
     <name>hadoop.kms.cache.timeout.ms</name>
     <name>hadoop.kms.cache.timeout.ms</name>
-    <value>10000</value>
+    <value>600000</value>
+  </property>
+
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
   </property>
   </property>
 +---+
 +---+
 
 
@@ -266,6 +279,25 @@ $ keytool -genkey -alias tomcat -keyalg RSA
         to provide the key material when creating or rolling a key.
         to provide the key material when creating or rolling a key.
     </description>
     </description>
   </property>
   </property>
+
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey
+      CryptoExtension operations
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decrypt EncryptedKey
+      CryptoExtension operations
+    </description>
+  </property>
+</configuration>
 +---+
 +---+
 
 
 ** KMS HTTP REST API
 ** KMS HTTP REST API
@@ -383,6 +415,70 @@ Content-Type: application/json
 }
 }
 +---+
 +---+
 
 
+
+*** Generate Encrypted Key for Current KeyVersion
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/key/<key-name>/_eek?eek_op=generate&num_keys=<number-of-keys-to-generate>
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+[
+  {
+    "versionName"         : "encryptionVersionName",
+    "iv"                  : "<iv>",          //base64
+    "encryptedKeyVersion" : {
+        "versionName"       : "EEK",
+        "material"          : "<material>",    //base64
+    }
+  },
+  {
+    "versionName"         : "encryptionVersionName",
+    "iv"                  : "<iv>",          //base64
+    "encryptedKeyVersion" : {
+        "versionName"       : "EEK",
+        "material"          : "<material>",    //base64
+    }
+  },
+  ...
+]
++---+
+
+*** Decrypt Encrypted Key
+
+  <REQUEST:>
+
++---+
+POST http://HOST:PORT/kms/v1/keyversion/<version-name>/_eek?ee_op=decrypt
+Content-Type: application/json
+
+{
+  "name"        : "<key-name>",
+  "iv"          : "<iv>",          //base64
+  "material"    : "<material>",    //base64
+}
+
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+{
+  "name"        : "EK",
+  "material"    : "<material>",    //base64
+}
++---+
+
+
 *** Get Key Version
 *** Get Key Version
 
 
   <REQUEST:>
   <REQUEST:>

+ 100 - 11
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.crypto.key.kms.server;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -36,6 +39,7 @@ import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginContext;
+
 import java.io.File;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.IOException;
@@ -267,7 +271,7 @@ public class TestKMS {
     }
     }
   }
   }
 
 
-  private void doAs(String user, final PrivilegedExceptionAction<Void> action)
+  private <T> T doAs(String user, final PrivilegedExceptionAction<T> action)
       throws Exception {
       throws Exception {
     Set<Principal> principals = new HashSet<Principal>();
     Set<Principal> principals = new HashSet<Principal>();
     principals.add(new KerberosPrincipal(user));
     principals.add(new KerberosPrincipal(user));
@@ -280,7 +284,7 @@ public class TestKMS {
     try {
     try {
       loginContext.login();
       loginContext.login();
       subject = loginContext.getSubject();
       subject = loginContext.getSubject();
-      Subject.doAs(subject, action);
+      return Subject.doAs(subject, action);
     } finally {
     } finally {
       loginContext.logout();
       loginContext.logout();
     }
     }
@@ -474,6 +478,32 @@ public class TestKMS {
         Assert.assertNotNull(kms1[0].getCreated());
         Assert.assertNotNull(kms1[0].getCreated());
         Assert.assertTrue(started.before(kms1[0].getCreated()));
         Assert.assertTrue(started.before(kms1[0].getCreated()));
 
 
+        // test generate and decryption of EEK
+        KeyProvider.KeyVersion kv = kp.getCurrentKey("k1");
+        KeyProviderCryptoExtension kpExt =
+            KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
+
+        EncryptedKeyVersion ek1 = kpExt.generateEncryptedKey(kv.getName());
+        Assert.assertEquals(KeyProviderCryptoExtension.EEK,
+            ek1.getEncryptedKeyVersion().getVersionName());
+        Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
+        Assert.assertEquals(kv.getMaterial().length,
+            ek1.getEncryptedKeyVersion().getMaterial().length);
+        KeyProvider.KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
+        Assert.assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
+        KeyProvider.KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
+        Assert.assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
+        Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
+
+        EncryptedKeyVersion ek2 = kpExt.generateEncryptedKey(kv.getName());
+        KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
+        boolean isEq = true;
+        for (int i = 0; isEq && i < ek2.getEncryptedKeyVersion()
+            .getMaterial().length; i++) {
+          isEq = k2.getMaterial()[i] == k1.getMaterial()[i];
+        }
+        Assert.assertFalse(isEq);
+
         // deleteKey()
         // deleteKey()
         kp.deleteKey("k1");
         kp.deleteKey("k1");
 
 
@@ -565,7 +595,7 @@ public class TestKMS {
       @Override
       @Override
       public Void call() throws Exception {
       public Void call() throws Exception {
         final Configuration conf = new Configuration();
         final Configuration conf = new Configuration();
-        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
+        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
         URI uri = createKMSUri(getKMSUrl());
         URI uri = createKMSUri(getKMSUrl());
         final KeyProvider kp = new KMSClientProvider(uri, conf);
         final KeyProvider kp = new KMSClientProvider(uri, conf);
 
 
@@ -582,7 +612,7 @@ public class TestKMS {
               Assert.fail(ex.toString());
               Assert.fail(ex.toString());
             }
             }
             try {
             try {
-              kp.createKey("k", new byte[8], new KeyProvider.Options(conf));
+              kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
               Assert.fail();
               Assert.fail();
             } catch (AuthorizationException ex) {
             } catch (AuthorizationException ex) {
               //NOP
               //NOP
@@ -598,7 +628,7 @@ public class TestKMS {
               Assert.fail(ex.toString());
               Assert.fail(ex.toString());
             }
             }
             try {
             try {
-              kp.rollNewVersion("k", new byte[8]);
+              kp.rollNewVersion("k", new byte[16]);
               Assert.fail();
               Assert.fail();
             } catch (AuthorizationException ex) {
             } catch (AuthorizationException ex) {
               //NOP
               //NOP
@@ -690,7 +720,7 @@ public class TestKMS {
           @Override
           @Override
           public Void run() throws Exception {
           public Void run() throws Exception {
             try {
             try {
-              KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[8],
+              KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16],
                   new KeyProvider.Options(conf));
                   new KeyProvider.Options(conf));
               Assert.assertNull(kv.getMaterial());
               Assert.assertNull(kv.getMaterial());
             } catch (Exception ex) {
             } catch (Exception ex) {
@@ -717,7 +747,8 @@ public class TestKMS {
           @Override
           @Override
           public Void run() throws Exception {
           public Void run() throws Exception {
             try {
             try {
-              KeyProvider.KeyVersion kv = kp.rollNewVersion("k1", new byte[8]);
+              KeyProvider.KeyVersion kv =
+                  kp.rollNewVersion("k1", new byte[16]);
               Assert.assertNull(kv.getMaterial());
               Assert.assertNull(kv.getMaterial());
             } catch (Exception ex) {
             } catch (Exception ex) {
               Assert.fail(ex.toString());
               Assert.fail(ex.toString());
@@ -726,12 +757,46 @@ public class TestKMS {
           }
           }
         });
         });
 
 
-        doAs("GET", new PrivilegedExceptionAction<Void>() {
+        final KeyVersion currKv =
+            doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
           @Override
           @Override
-          public Void run() throws Exception {
+          public KeyVersion run() throws Exception {
             try {
             try {
               kp.getKeyVersion("k1@0");
               kp.getKeyVersion("k1@0");
-              kp.getCurrentKey("k1");
+              KeyVersion kv = kp.getCurrentKey("k1");
+              return kv;
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        final EncryptedKeyVersion encKv =
+            doAs("GENERATE_EEK",
+                new PrivilegedExceptionAction<EncryptedKeyVersion>() {
+          @Override
+          public EncryptedKeyVersion run() throws Exception {
+            try {
+              KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
+                      createKeyProviderCryptoExtension(kp);
+              EncryptedKeyVersion ek1 =
+                  kpCE.generateEncryptedKey(currKv.getName());
+              return ek1;
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
+                      createKeyProviderCryptoExtension(kp);
+              kpCE.decryptEncryptedKey(encKv);
             } catch (Exception ex) {
             } catch (Exception ex) {
               Assert.fail(ex.toString());
               Assert.fail(ex.toString());
             }
             }
@@ -817,7 +882,7 @@ public class TestKMS {
       @Override
       @Override
       public Void call() throws Exception {
       public Void call() throws Exception {
         final Configuration conf = new Configuration();
         final Configuration conf = new Configuration();
-        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
+        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
         URI uri = createKMSUri(getKMSUrl());
         URI uri = createKMSUri(getKMSUrl());
         final KeyProvider kp = new KMSClientProvider(uri, conf);
         final KeyProvider kp = new KMSClientProvider(uri, conf);
 
 
@@ -889,6 +954,30 @@ public class TestKMS {
       Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
       Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
     }
     }
 
 
+    caughtTimeout = false;
+    try {
+      KeyProvider kp = new KMSClientProvider(uri, conf);
+      KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
+          .generateEncryptedKey("a");
+    } catch (SocketTimeoutException e) {
+      caughtTimeout = true;
+    } catch (IOException e) {
+      Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
+    }
+
+    caughtTimeout = false;
+    try {
+      KeyProvider kp = new KMSClientProvider(uri, conf);
+      KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
+          .decryptEncryptedKey(
+              new KMSClientProvider.KMSEncryptedKeyVersion("a",
+                  "a", new byte[] {1, 2}, "EEK", new byte[] {1, 2}));
+    } catch (SocketTimeoutException e) {
+      caughtTimeout = true;
+    } catch (IOException e) {
+      Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
+    }
+
     Assert.assertTrue(caughtTimeout);
     Assert.assertTrue(caughtTimeout);
 
 
     sock.close();
     sock.close();

+ 19 - 6
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java

@@ -71,7 +71,16 @@ public class NfsExports {
   
   
   private static final Pattern CIDR_FORMAT_LONG = 
   private static final Pattern CIDR_FORMAT_LONG = 
       Pattern.compile(SLASH_FORMAT_LONG);
       Pattern.compile(SLASH_FORMAT_LONG);
-  
+
+  // Hostnames are composed of series of 'labels' concatenated with dots.
+  // Labels can be between 1-63 characters long, and can only take
+  // letters, digits & hyphens. They cannot start and end with hyphens. For
+  // more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
+  private static final String LABEL_FORMAT =
+      "[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
+  private static final Pattern HOSTNAME_FORMAT =
+      Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
+
   static class AccessCacheEntry implements LightWeightCache.Entry{
   static class AccessCacheEntry implements LightWeightCache.Entry{
     private final String hostAddr;
     private final String hostAddr;
     private AccessPrivilege access;
     private AccessPrivilege access;
@@ -381,10 +390,14 @@ public class NfsExports {
         LOG.debug("Using Regex match for '" + host + "' and " + privilege);
         LOG.debug("Using Regex match for '" + host + "' and " + privilege);
       }
       }
       return new RegexMatch(privilege, host);
       return new RegexMatch(privilege, host);
+    } else if (HOSTNAME_FORMAT.matcher(host).matches()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Using exact match for '" + host + "' and " + privilege);
+      }
+      return new ExactMatch(privilege, host);
+    } else {
+      throw new IllegalArgumentException("Invalid hostname provided '" + host
+          + "'");
     }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Using exact match for '" + host + "' and " + privilege);
-    }
-    return new ExactMatch(privilege, host);
   }
   }
-}
+}

+ 12 - 0
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java

@@ -194,4 +194,16 @@ public class TestNfsExports {
     } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
     } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
     Assert.assertEquals(AccessPrivilege.NONE, ap);
     Assert.assertEquals(AccessPrivilege.NONE, ap);
   }
   }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidHost() {
+      NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+        "foo#bar");
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidSeparator() {
+      NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+        "foo ro : bar rw");
+  }
 }
 }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java

@@ -154,6 +154,8 @@ public class Nfs3Utils {
     if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
     if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
       if (type == NfsFileType.NFSREG.toValue()) {
       if (type == NfsFileType.NFSREG.toValue()) {
         rtn |= Nfs3Constant.ACCESS3_EXECUTE;
         rtn |= Nfs3Constant.ACCESS3_EXECUTE;
+      } else {
+        rtn |= Nfs3Constant.ACCESS3_LOOKUP;
       }
       }
     }
     }
     return rtn;
     return rtn;

+ 13 - 7
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -1051,8 +1051,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   
   
   @Override
   @Override
   public REMOVE3Response remove(XDR xdr, RpcInfo info) {
   public REMOVE3Response remove(XDR xdr, RpcInfo info) {
+    return remove(xdr, getSecurityHandler(info), info.remoteAddress());
+  }
+ 
+  @VisibleForTesting
+  REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
     REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
     REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
-    SecurityHandler securityHandler = getSecurityHandler(info);
     DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
     DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
     if (dfsClient == null) {
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
@@ -1083,17 +1087,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
       }
       }
 
 
+      WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
+          preOpDirAttr);
+      if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
+      }
+
       String fileIdPath = dirFileIdPath + "/" + fileName;
       String fileIdPath = dirFileIdPath + "/" + fileName;
       HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
       HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
       if (fstat == null) {
       if (fstat == null) {
-        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
-            preOpDirAttr);
-        return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc);
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
       }
       }
       if (fstat.isDir()) {
       if (fstat.isDir()) {
-        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
-            preOpDirAttr);
-        return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, dirWcc);
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
       }
       }
 
 
       boolean result = dfsClient.delete(fileIdPath, false);
       boolean result = dfsClient.delete(fileIdPath, false);

+ 120 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java

@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.nfs.nfs3.FileHandle;
+import org.apache.hadoop.nfs.nfs3.Nfs3Status;
+import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.SecurityHandler;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestClientAccessPrivilege {
+  static MiniDFSCluster cluster = null;
+  static NfsConfiguration config = new NfsConfiguration();
+  static DistributedFileSystem hdfs;
+  static NameNode nn;
+  static String testdir = "/tmp";
+  static SecurityHandler securityHandler;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+
+    String currentUser = System.getProperty("user.name");
+    config.set(DefaultImpersonationProvider.getTestProvider()
+        .getProxySuperuserGroupConfKey(currentUser), "*");
+    config.set(DefaultImpersonationProvider.getTestProvider()
+        .getProxySuperuserIpConfKey(currentUser), "*");
+    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+    cluster.waitActive();
+    hdfs = cluster.getFileSystem();
+    nn = cluster.getNameNode();
+
+    // Use ephemeral port in case tests are running in parallel
+    config.setInt("nfs3.mountd.port", 0);
+    config.setInt("nfs3.server.port", 0);
+
+    securityHandler = Mockito.mock(SecurityHandler.class);
+    Mockito.when(securityHandler.getUser()).thenReturn(
+        System.getProperty("user.name"));
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void createFiles() throws IllegalArgumentException, IOException {
+    hdfs.delete(new Path(testdir), true);
+    hdfs.mkdirs(new Path(testdir));
+    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
+  }
+
+  @Test(timeout = 60000)
+  public void testClientAccessPrivilegeForRemove() throws Exception {
+    // Configure ro access for nfs1 service
+    config.set("dfs.nfs.exports.allowed.hosts", "* ro");
+
+    // Start nfs
+    Nfs3 nfs = new Nfs3(config);
+    nfs.startServiceInternal(false);
+
+    RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
+
+    // Create a remove request
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("f1");
+
+    // Remove operation
+    REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+
+    // Assert on return code
+    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
+        response.getStatus());
+
+  }
+
+}

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java

@@ -68,5 +68,12 @@ public class TestNfs3Utils {
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
     assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
     assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
+    
+    Mockito.when(attr.getUid()).thenReturn(2);
+    Mockito.when(attr.getGid()).thenReturn(10);
+    Mockito.when(attr.getMode()).thenReturn(457); // 711
+    Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
+    assertEquals("Access should be allowed for dir as mode is 711 and GID matches",
+        2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr));
   }
   }
 }
 }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java

@@ -72,11 +72,11 @@ public class TestReaddir {
   public static void setup() throws Exception {
   public static void setup() throws Exception {
     String currentUser = System.getProperty("user.name");
     String currentUser = System.getProperty("user.name");
     config.set(
     config.set(
-            DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
-            "*");
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserGroupConfKey(currentUser), "*");
     config.set(
     config.set(
-            DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
-            "*");
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserIpConfKey(currentUser), "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
     cluster.waitActive();
     cluster.waitActive();

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

@@ -312,10 +312,12 @@ public class TestWrites {
         System.getProperty("user.name"));
         System.getProperty("user.name"));
     String currentUser = System.getProperty("user.name");
     String currentUser = System.getProperty("user.name");
     config.set(
     config.set(
-            DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserGroupConfKey(currentUser),
             "*");
             "*");
     config.set(
     config.set(
-            DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserIpConfKey(currentUser),
             "*");
             "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
 
 

+ 58 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -12,6 +12,8 @@ Trunk (Unreleased)
     HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
     HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
     (wheat9)
     (wheat9)
 
 
+    HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -287,8 +289,35 @@ Release 2.6.0 - UNRELEASED
     HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
     HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
     (cnauroth)
     (cnauroth)
 
 
+    HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
+    (Stephen Chu via cnauroth)
+
+    HDFS-6655. Add 'header banner' to 'explorer.html' also in Namenode UI
+    (vinayakumarb)
+
+    HDFS-4120. Add a new "-skipSharedEditsCheck" option for BootstrapStandby
+    (Liang Xie and Rakesh R via vinayakumarb)
+
+    HDFS-6597. Add a new option to NN upgrade to terminate the process after
+    upgrade on NN is completed. (Danilo Vunjak via cnauroth)
+
+    HDFS-6700. BlockPlacementPolicy shoud choose storage but not datanode for
+    deletion. (szetszwo)
+
+    HDFS-6616. Add exclude-datanodes feature to WebHDFS redirection so that it
+    will not redirect retries to the same datanode. (zhaoyunjiong via szetszwo)
+
+    HDFS-6702. Change DFSClient to pass the StorageType from the namenode to
+    datanodes and change datanode to write block replicas using the specified
+    storage type. (szetszwo)
+
+    HDFS-6701. Make seed optional in NetworkTopology#sortByDistance.
+    (Ashwin Shankar via wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-6690. Deduplicate xattr names in memory. (wang)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
     HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
@@ -309,6 +338,25 @@ Release 2.6.0 - UNRELEASED
     HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
     HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
     datanode to drop into infinite loop (cmccabe)
     datanode to drop into infinite loop (cmccabe)
 
 
+    HDFS-6456. NFS should throw error for invalid entry in 
+    dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
+
+    HDFS-6689. NFS doesn't return correct lookup access for direcories (brandonli)
+
+    HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
+    (Ming Ma via jing9)
+
+    HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
+
+    HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
+    with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
+
+    HDFS-6704. Fix the command to launch JournalNode in HDFS-HA document.
+    (Akira AJISAKA via jing9)
+
+    HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
+    a null pointer exception. (Masatake Iwasaki via brandonli)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -554,6 +602,11 @@ Release 2.5.0 - UNRELEASED
     HDFS-6493. Change dfs.namenode.startup.delay.block.deletion to second
     HDFS-6493. Change dfs.namenode.startup.delay.block.deletion to second
     instead of millisecond. (Juan Yu via wang)
     instead of millisecond. (Juan Yu via wang)
 
 
+    HDFS-6680. BlockPlacementPolicyDefault does not choose favored nodes
+    correctly.  (szetszwo) 
+
+    HDFS-6712. Document HDFS Multihoming Settings. (Arpit Agarwal)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -563,6 +616,8 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6583. Remove clientNode in FileUnderConstructionFeature. (wheat9)
     HDFS-6583. Remove clientNode in FileUnderConstructionFeature. (wheat9)
 
 
+    HDFS-6599. 2.4 addBlock is 10 to 20 times slower compared to 0.23 (daryn)
+
   BUG FIXES 
   BUG FIXES 
 
 
     HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.
     HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.
@@ -834,6 +889,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6378. NFS registration should timeout instead of hanging when
     HDFS-6378. NFS registration should timeout instead of hanging when
     portmap/rpcbind is not available (Abhiraj Butala via brandonli)
     portmap/rpcbind is not available (Abhiraj Butala via brandonli)
 
 
+    HDFS-6703. NFS: Files can be deleted from a read-only mount
+    (Srikanth Upputuri via brandonli)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

+ 169 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java

@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import java.io.File;
+import java.io.FileFilter;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints.SlowCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+
+public class TestBootstrapStandbyWithBKJM {
+  private static BKJMUtil bkutil;
+  protected MiniDFSCluster cluster;
+
+  @BeforeClass
+  public static void setupBookkeeper() throws Exception {
+    bkutil = new BKJMUtil(3);
+    bkutil.start();
+  }
+
+  @AfterClass
+  public static void teardownBookkeeper() throws Exception {
+    bkutil.teardown();
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+        .createJournalURI("/bootstrapStandby").toString());
+    BKJMUtil.addJournalManagerDefinition(conf);
+    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
+    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
+        SlowCodec.class.getCanonicalName());
+    CompressionCodecFactory.setCodecClasses(conf,
+        ImmutableList.<Class> of(SlowCodec.class));
+    MiniDFSNNTopology topology = new MiniDFSNNTopology()
+        .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
+            new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
+            new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
+        .numDataNodes(1).manageNameDfsSharedDirs(false).build();
+    cluster.waitActive();
+  }
+
+  /**
+   * While boostrapping, in_progress transaction entries should be skipped.
+   * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
+   */
+  @Test
+  public void testBootstrapStandbyWithActiveNN() throws Exception {
+    // make nn0 active
+    cluster.transitionToActive(0);
+   
+    // do ops and generate in-progress edit log data
+    Configuration confNN1 = cluster.getConfiguration(1);
+    DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
+        .configureFailoverFs(cluster, confNN1);
+    for (int i = 1; i <= 10; i++) {
+      dfs.mkdirs(new Path("/test" + i));
+    }
+    dfs.close();
+
+    // shutdown nn1 and delete its edit log files
+    cluster.shutdownNameNode(1);
+    deleteEditLogIfExists(confNN1);
+    cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
+    cluster.getNameNodeRpc(0).saveNamespace();
+    cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
+
+    // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
+    // immediately after saveNamespace
+    int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
+      confNN1);
+    Assert.assertEquals("Mismatches return code", 6, rc);
+
+    // check with -skipSharedEditsCheck
+    rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
+        "-skipSharedEditsCheck" }, confNN1);
+    Assert.assertEquals("Mismatches return code", 0, rc);
+
+    // Checkpoint as fast as we can, in a tight loop.
+    confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
+    cluster.restartNameNode(1);
+    cluster.transitionToStandby(1);
+   
+    NameNode nn0 = cluster.getNameNode(0);
+    HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
+    long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
+        .getFSImage().getMostRecentCheckpointTxId();
+    HATestUtil.waitForCheckpoint(cluster, 1,
+        ImmutableList.of((int) expectedCheckpointTxId));
+
+    // Should have copied over the namespace
+    FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
+        ImmutableList.of((int) expectedCheckpointTxId));
+    FSImageTestUtil.assertNNFilesMatch(cluster);
+  }
+
+  private void deleteEditLogIfExists(Configuration confNN1) {
+    String editDirs = confNN1.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
+    String[] listEditDirs = StringUtils.split(editDirs, ',');
+    Assert.assertTrue("Wrong edit directory path!", listEditDirs.length > 0);
+
+    for (String dir : listEditDirs) {
+      File curDir = new File(dir, "current");
+      File[] listFiles = curDir.listFiles(new FileFilter() {
+        @Override
+        public boolean accept(File f) {
+          if (!f.getName().startsWith("edits")) {
+            return true;
+          }
+          return false;
+        }
+      });
+      if (listFiles != null && listFiles.length > 0) {
+        for (File file : listFiles) {
+          Assert.assertTrue("Failed to delete edit files!", file.delete());
+        }
+      }
+    }
+  }
+}

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -214,6 +214,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
 
 
+  public static final String DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK = "dfs.namenode.randomize-block-locations-per-block";
+  public static final boolean DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT = false;
+
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
   public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
   public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
 
 

+ 31 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -313,6 +313,7 @@ public class DFSOutputStream extends FSOutputSummer
     private DataInputStream blockReplyStream;
     private DataInputStream blockReplyStream;
     private ResponseProcessor response = null;
     private ResponseProcessor response = null;
     private volatile DatanodeInfo[] nodes = null; // list of targets for current block
     private volatile DatanodeInfo[] nodes = null; // list of targets for current block
+    private volatile StorageType[] storageTypes = null;
     private volatile String[] storageIDs = null;
     private volatile String[] storageIDs = null;
     private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes =
     private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes =
         CacheBuilder.newBuilder()
         CacheBuilder.newBuilder()
@@ -417,10 +418,12 @@ public class DFSOutputStream extends FSOutputSummer
     }
     }
     
     
     private void setPipeline(LocatedBlock lb) {
     private void setPipeline(LocatedBlock lb) {
-      setPipeline(lb.getLocations(), lb.getStorageIDs());
+      setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
     }
     }
-    private void setPipeline(DatanodeInfo[] nodes, String[] storageIDs) {
+    private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
+        String[] storageIDs) {
       this.nodes = nodes;
       this.nodes = nodes;
+      this.storageTypes = storageTypes;
       this.storageIDs = storageIDs;
       this.storageIDs = storageIDs;
     }
     }
 
 
@@ -446,7 +449,7 @@ public class DFSOutputStream extends FSOutputSummer
       this.setName("DataStreamer for file " + src);
       this.setName("DataStreamer for file " + src);
       closeResponder();
       closeResponder();
       closeStream();
       closeStream();
-      setPipeline(null, null);
+      setPipeline(null, null, null);
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
     }
     }
     
     
@@ -1031,10 +1034,12 @@ public class DFSOutputStream extends FSOutputSummer
       //transfer replica
       //transfer replica
       final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
       final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
       final DatanodeInfo[] targets = {nodes[d]};
       final DatanodeInfo[] targets = {nodes[d]};
-      transfer(src, targets, lb.getBlockToken());
+      final StorageType[] targetStorageTypes = {storageTypes[d]};
+      transfer(src, targets, targetStorageTypes, lb.getBlockToken());
     }
     }
 
 
     private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
     private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
+        final StorageType[] targetStorageTypes,
         final Token<BlockTokenIdentifier> blockToken) throws IOException {
         final Token<BlockTokenIdentifier> blockToken) throws IOException {
       //transfer replica to the new datanode
       //transfer replica to the new datanode
       Socket sock = null;
       Socket sock = null;
@@ -1056,7 +1061,7 @@ public class DFSOutputStream extends FSOutputSummer
 
 
         //send the TRANSFER_BLOCK request
         //send the TRANSFER_BLOCK request
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
-            targets);
+            targets, targetStorageTypes);
         out.flush();
         out.flush();
 
 
         //ack
         //ack
@@ -1135,16 +1140,15 @@ public class DFSOutputStream extends FSOutputSummer
           failed.add(nodes[errorIndex]);
           failed.add(nodes[errorIndex]);
 
 
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
-          System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
-          System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
-              newnodes.length-errorIndex);
+          arraycopy(nodes, newnodes, errorIndex);
+
+          final StorageType[] newStorageTypes = new StorageType[newnodes.length];
+          arraycopy(storageTypes, newStorageTypes, errorIndex);
 
 
           final String[] newStorageIDs = new String[newnodes.length];
           final String[] newStorageIDs = new String[newnodes.length];
-          System.arraycopy(storageIDs, 0, newStorageIDs, 0, errorIndex);
-          System.arraycopy(storageIDs, errorIndex+1, newStorageIDs, errorIndex,
-              newStorageIDs.length-errorIndex);
+          arraycopy(storageIDs, newStorageIDs, errorIndex);
           
           
-          setPipeline(newnodes, newStorageIDs);
+          setPipeline(newnodes, newStorageTypes, newStorageIDs);
 
 
           // Just took care of a node error while waiting for a node restart
           // Just took care of a node error while waiting for a node restart
           if (restartingNodeIndex >= 0) {
           if (restartingNodeIndex >= 0) {
@@ -1181,7 +1185,7 @@ public class DFSOutputStream extends FSOutputSummer
         
         
         // set up the pipeline again with the remaining nodes
         // set up the pipeline again with the remaining nodes
         if (failPacket) { // for testing
         if (failPacket) { // for testing
-          success = createBlockOutputStream(nodes, newGS, isRecovery);
+          success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
           failPacket = false;
           failPacket = false;
           try {
           try {
             // Give DNs time to send in bad reports. In real situations,
             // Give DNs time to send in bad reports. In real situations,
@@ -1190,7 +1194,7 @@ public class DFSOutputStream extends FSOutputSummer
             Thread.sleep(2000);
             Thread.sleep(2000);
           } catch (InterruptedException ie) {}
           } catch (InterruptedException ie) {}
         } else {
         } else {
-          success = createBlockOutputStream(nodes, newGS, isRecovery);
+          success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
         }
         }
 
 
         if (restartingNodeIndex >= 0) {
         if (restartingNodeIndex >= 0) {
@@ -1242,6 +1246,7 @@ public class DFSOutputStream extends FSOutputSummer
     private LocatedBlock nextBlockOutputStream() throws IOException {
     private LocatedBlock nextBlockOutputStream() throws IOException {
       LocatedBlock lb = null;
       LocatedBlock lb = null;
       DatanodeInfo[] nodes = null;
       DatanodeInfo[] nodes = null;
+      StorageType[] storageTypes = null;
       int count = dfsClient.getConf().nBlockWriteRetry;
       int count = dfsClient.getConf().nBlockWriteRetry;
       boolean success = false;
       boolean success = false;
       ExtendedBlock oldBlock = block;
       ExtendedBlock oldBlock = block;
@@ -1264,11 +1269,12 @@ public class DFSOutputStream extends FSOutputSummer
         bytesSent = 0;
         bytesSent = 0;
         accessToken = lb.getBlockToken();
         accessToken = lb.getBlockToken();
         nodes = lb.getLocations();
         nodes = lb.getLocations();
+        storageTypes = lb.getStorageTypes();
 
 
         //
         //
         // Connect to first DataNode in the list.
         // Connect to first DataNode in the list.
         //
         //
-        success = createBlockOutputStream(nodes, 0L, false);
+        success = createBlockOutputStream(nodes, storageTypes, 0L, false);
 
 
         if (!success) {
         if (!success) {
           DFSClient.LOG.info("Abandoning " + block);
           DFSClient.LOG.info("Abandoning " + block);
@@ -1289,8 +1295,8 @@ public class DFSOutputStream extends FSOutputSummer
     // connects to the first datanode in the pipeline
     // connects to the first datanode in the pipeline
     // Returns true if success, otherwise return failure.
     // Returns true if success, otherwise return failure.
     //
     //
-    private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
-        boolean recoveryFlag) {
+    private boolean createBlockOutputStream(DatanodeInfo[] nodes,
+        StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
       if (nodes.length == 0) {
       if (nodes.length == 0) {
         DFSClient.LOG.info("nodes are empty for write pipeline of block "
         DFSClient.LOG.info("nodes are empty for write pipeline of block "
             + block);
             + block);
@@ -1332,9 +1338,10 @@ public class DFSOutputStream extends FSOutputSummer
           // Xmit header info to datanode
           // Xmit header info to datanode
           //
           //
   
   
+          BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): stage;
           // send the request
           // send the request
-          new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
-              nodes, null, recoveryFlag? stage.getRecoveryStage() : stage, 
+          new Sender(out).writeBlock(block, nodeStorageTypes[0], accessToken,
+              dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
               nodes.length, block.getNumBytes(), bytesSent, newGS, checksum,
               nodes.length, block.getNumBytes(), bytesSent, newGS, checksum,
               cachingStrategy.get());
               cachingStrategy.get());
   
   
@@ -2197,4 +2204,9 @@ public class DFSOutputStream extends FSOutputSummer
   public long getFileId() {
   public long getFileId() {
     return fileId;
     return fileId;
   }
   }
+
+  private static <T> void arraycopy(T[] srcs, T[] dsts, int skipIndex) {
+    System.arraycopy(srcs, 0, dsts, 0, skipIndex);
+    System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, dsts.length-skipIndex);
+  }
 }
 }

+ 21 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java

@@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.List;
 import java.util.List;
@@ -38,14 +37,13 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
-import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -259,12 +257,11 @@ public class HAUtil {
   /**
   /**
    * Parse the file system URI out of the provided token.
    * Parse the file system URI out of the provided token.
    */
    */
-  public static URI getServiceUriFromToken(final String scheme,
-                                           Token<?> token) {
+  public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
     String tokStr = token.getService().toString();
     String tokStr = token.getService().toString();
-
-    if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) {
-      tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, "");
+    final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
+    if (tokStr.startsWith(prefix)) {
+      tokStr = tokStr.replaceFirst(prefix, "");
     }
     }
     return URI.create(scheme + "://" + tokStr);
     return URI.create(scheme + "://" + tokStr);
   }
   }
@@ -273,10 +270,13 @@ public class HAUtil {
    * Get the service name used in the delegation token for the given logical
    * Get the service name used in the delegation token for the given logical
    * HA service.
    * HA service.
    * @param uri the logical URI of the cluster
    * @param uri the logical URI of the cluster
+   * @param scheme the scheme of the corresponding FileSystem
    * @return the service name
    * @return the service name
    */
    */
-  public static Text buildTokenServiceForLogicalUri(URI uri) {
-    return new Text(HA_DT_SERVICE_PREFIX + uri.getHost());
+  public static Text buildTokenServiceForLogicalUri(final URI uri,
+      final String scheme) {
+    return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+        + uri.getHost());
   }
   }
   
   
   /**
   /**
@@ -286,7 +286,11 @@ public class HAUtil {
   public static boolean isTokenForLogicalUri(Token<?> token) {
   public static boolean isTokenForLogicalUri(Token<?> token) {
     return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
     return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
   }
   }
-  
+
+  public static String buildTokenServicePrefixForLogicalUri(String scheme) {
+    return HA_DT_SERVICE_PREFIX + scheme + ":";
+  }
+
   /**
   /**
    * Locate a delegation token associated with the given HA cluster URI, and if
    * Locate a delegation token associated with the given HA cluster URI, and if
    * one is found, clone it to also represent the underlying namenode address.
    * one is found, clone it to also represent the underlying namenode address.
@@ -298,7 +302,9 @@ public class HAUtil {
   public static void cloneDelegationTokenForLogicalUri(
   public static void cloneDelegationTokenForLogicalUri(
       UserGroupInformation ugi, URI haUri,
       UserGroupInformation ugi, URI haUri,
       Collection<InetSocketAddress> nnAddrs) {
       Collection<InetSocketAddress> nnAddrs) {
-    Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri);
+    // this cloning logic is only used by hdfs
+    Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+        HdfsConstants.HDFS_URI_SCHEME);
     Token<DelegationTokenIdentifier> haToken =
     Token<DelegationTokenIdentifier> haToken =
         tokenSelector.selectToken(haService, ugi.getTokens());
         tokenSelector.selectToken(haService, ugi.getTokens());
     if (haToken != null) {
     if (haToken != null) {
@@ -309,8 +315,9 @@ public class HAUtil {
         Token<DelegationTokenIdentifier> specificToken =
         Token<DelegationTokenIdentifier> specificToken =
             new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
             new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
         SecurityUtil.setTokenService(specificToken, singleNNAddr);
         SecurityUtil.setTokenService(specificToken, singleNNAddr);
-        Text alias =
-            new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
+        Text alias = new Text(
+            buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+                + "//" + specificToken.getService());
         ugi.addToken(alias, specificToken);
         ugi.addToken(alias, specificToken);
         LOG.debug("Mapped HA service delegation token for logical URI " +
         LOG.debug("Mapped HA service delegation token for logical URI " +
             haUri + " to namenode " + singleNNAddr);
             haUri + " to namenode " + singleNNAddr);

+ 28 - 29
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -163,7 +163,8 @@ public class NameNodeProxies {
 
 
       Text dtService;
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+            HdfsConstants.HDFS_URI_SCHEME);
       } else {
       } else {
         dtService = SecurityUtil.buildTokenService(
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));
             NameNode.getAddress(nameNodeUri));
@@ -224,7 +225,8 @@ public class NameNodeProxies {
           new Class[] { xface }, dummyHandler);
           new Class[] { xface }, dummyHandler);
       Text dtService;
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+            HdfsConstants.HDFS_URI_SCHEME);
       } else {
       } else {
         dtService = SecurityUtil.buildTokenService(
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));
             NameNode.getAddress(nameNodeUri));
@@ -333,19 +335,18 @@ public class NameNodeProxies {
         address, conf, ugi, NamenodeProtocolPB.class, 0);
         address, conf, ugi, NamenodeProtocolPB.class, 0);
     if (withRetries) { // create the proxy with retries
     if (withRetries) { // create the proxy with retries
       RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
       RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
-          TimeUnit.MILLISECONDS);
-      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap 
-                     = new HashMap<Class<? extends Exception>, RetryPolicy>();
-      RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
-          exceptionToPolicyMap);
-      Map<String, RetryPolicy> methodNameToPolicyMap 
-                     = new HashMap<String, RetryPolicy>();
-      methodNameToPolicyMap.put("getBlocks", methodPolicy);
-      methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
-      proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
-          proxy, methodNameToPolicyMap);
+              TimeUnit.MILLISECONDS);
+      Map<String, RetryPolicy> methodNameToPolicyMap
+           = new HashMap<String, RetryPolicy>();
+      methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
+      methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
+      NamenodeProtocol translatorProxy =
+          new NamenodeProtocolTranslatorPB(proxy);
+      return (NamenodeProtocol) RetryProxy.create(
+          NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
+    } else {
+      return new NamenodeProtocolTranslatorPB(proxy);
     }
     }
-    return new NamenodeProtocolTranslatorPB(proxy);
   }
   }
   
   
   private static ClientProtocol createNNProxyWithClientProtocol(
   private static ClientProtocol createNNProxyWithClientProtocol(
@@ -379,29 +380,27 @@ public class NameNodeProxies {
                  = new HashMap<Class<? extends Exception>, RetryPolicy>();
                  = new HashMap<Class<? extends Exception>, RetryPolicy>();
       remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
       remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
           createPolicy);
           createPolicy);
-    
-      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
-                 = new HashMap<Class<? extends Exception>, RetryPolicy>();
-      exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
-          .retryByRemoteException(defaultPolicy,
-              remoteExceptionToPolicyMap));
-      RetryPolicy methodPolicy = RetryPolicies.retryByException(
-          defaultPolicy, exceptionToPolicyMap);
+
+      RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
+          defaultPolicy, remoteExceptionToPolicyMap);
       Map<String, RetryPolicy> methodNameToPolicyMap 
       Map<String, RetryPolicy> methodNameToPolicyMap 
                  = new HashMap<String, RetryPolicy>();
                  = new HashMap<String, RetryPolicy>();
     
     
       methodNameToPolicyMap.put("create", methodPolicy);
       methodNameToPolicyMap.put("create", methodPolicy);
-    
-      proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
-          ClientNamenodeProtocolPB.class,
-          new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>(
-              ClientNamenodeProtocolPB.class, proxy),
+
+      ClientProtocol translatorProxy =
+        new ClientNamenodeProtocolTranslatorPB(proxy);
+      return (ClientProtocol) RetryProxy.create(
+          ClientProtocol.class,
+          new DefaultFailoverProxyProvider<ClientProtocol>(
+              ClientProtocol.class, translatorProxy),
           methodNameToPolicyMap,
           methodNameToPolicyMap,
           defaultPolicy);
           defaultPolicy);
+    } else {
+      return new ClientNamenodeProtocolTranslatorPB(proxy);
     }
     }
-    return new ClientNamenodeProtocolTranslatorPB(proxy);
   }
   }
-  
+
   private static Object createNameNodeProxy(InetSocketAddress address,
   private static Object createNameNodeProxy(InetSocketAddress address,
       Configuration conf, UserGroupInformation ugi, Class<?> xface,
       Configuration conf, UserGroupInformation ugi, Class<?> xface,
       int rpcTimeout) throws IOException {
       int rpcTimeout) throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -339,7 +339,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
     buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
     buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
     buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
     buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
     buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
-
+    buffer.append("Xceivers: "+getXceiverCount()+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
     return buffer.toString();
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -124,7 +124,7 @@ public class HdfsConstants {
    * of a delgation token, indicating that the URI is a logical (HA)
    * of a delgation token, indicating that the URI is a logical (HA)
    * URI.
    * URI.
    */
    */
-  public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
+  public static final String HA_DT_SERVICE_PREFIX = "ha-";
 
 
 
 
   /**
   /**

+ 18 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -71,11 +72,20 @@ public interface DataTransferProtocol {
 
 
   /**
   /**
    * Write a block to a datanode pipeline.
    * Write a block to a datanode pipeline.
-   * 
+   * The receiver datanode of this call is the next datanode in the pipeline.
+   * The other downstream datanodes are specified by the targets parameter.
+   * Note that the receiver {@link DatanodeInfo} is not required in the
+   * parameter list since the receiver datanode knows its info.  However, the
+   * {@link StorageType} for storing the replica in the receiver datanode is a 
+   * parameter since the receiver datanode may support multiple storage types.
+   *
    * @param blk the block being written.
    * @param blk the block being written.
+   * @param storageType for storing the replica in the receiver datanode.
    * @param blockToken security token for accessing the block.
    * @param blockToken security token for accessing the block.
    * @param clientName client's name.
    * @param clientName client's name.
-   * @param targets target datanodes in the pipeline.
+   * @param targets other downstream datanodes in the pipeline.
+   * @param targetStorageTypes target {@link StorageType}s corresponding
+   *                           to the target datanodes.
    * @param source source datanode.
    * @param source source datanode.
    * @param stage pipeline stage.
    * @param stage pipeline stage.
    * @param pipelineSize the size of the pipeline.
    * @param pipelineSize the size of the pipeline.
@@ -84,9 +94,11 @@ public interface DataTransferProtocol {
    * @param latestGenerationStamp the latest generation stamp of the block.
    * @param latestGenerationStamp the latest generation stamp of the block.
    */
    */
   public void writeBlock(final ExtendedBlock blk,
   public void writeBlock(final ExtendedBlock blk,
+      final StorageType storageType, 
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final DatanodeInfo[] targets,
       final DatanodeInfo[] targets,
+      final StorageType[] targetStorageTypes, 
       final DatanodeInfo source,
       final DatanodeInfo source,
       final BlockConstructionStage stage,
       final BlockConstructionStage stage,
       final int pipelineSize,
       final int pipelineSize,
@@ -110,7 +122,8 @@ public interface DataTransferProtocol {
   public void transferBlock(final ExtendedBlock blk,
   public void transferBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
-      final DatanodeInfo[] targets) throws IOException;
+      final DatanodeInfo[] targets,
+      final StorageType[] targetStorageTypes) throws IOException;
 
 
   /**
   /**
    * Request short circuit access file descriptors from a DataNode.
    * Request short circuit access file descriptors from a DataNode.
@@ -148,11 +161,13 @@ public interface DataTransferProtocol {
    * It is used for balancing purpose.
    * It is used for balancing purpose.
    * 
    * 
    * @param blk the block being replaced.
    * @param blk the block being replaced.
+   * @param storageType the {@link StorageType} for storing the block.
    * @param blockToken security token for accessing the block.
    * @param blockToken security token for accessing the block.
    * @param delHint the hint for deleting the block in the original datanode.
    * @param delHint the hint for deleting the block in the original datanode.
    * @param source the source datanode for receiving the block.
    * @param source the source datanode for receiving the block.
    */
    */
   public void replaceBlock(final ExtendedBlock blk,
   public void replaceBlock(final ExtendedBlock blk,
+      final StorageType storageType, 
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String delHint,
       final String delHint,
       final DatanodeInfo source) throws IOException;
       final DatanodeInfo source) throws IOException;

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -25,6 +25,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
@@ -121,10 +122,13 @@ public abstract class Receiver implements DataTransferProtocol {
   /** Receive OP_WRITE_BLOCK */
   /** Receive OP_WRITE_BLOCK */
   private void opWriteBlock(DataInputStream in) throws IOException {
   private void opWriteBlock(DataInputStream in) throws IOException {
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
+    final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
     writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
     writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+        PBHelper.convertStorageType(proto.getStorageType()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
-        PBHelper.convert(proto.getTargetsList()),
+        targets,
+        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
         PBHelper.convert(proto.getSource()),
         PBHelper.convert(proto.getSource()),
         fromProto(proto.getStage()),
         fromProto(proto.getStage()),
         proto.getPipelineSize(),
         proto.getPipelineSize(),
@@ -140,10 +144,12 @@ public abstract class Receiver implements DataTransferProtocol {
   private void opTransferBlock(DataInputStream in) throws IOException {
   private void opTransferBlock(DataInputStream in) throws IOException {
     final OpTransferBlockProto proto =
     final OpTransferBlockProto proto =
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
+    final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
     transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
     transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
-        PBHelper.convert(proto.getTargetsList()));
+        targets,
+        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
   }
   }
 
 
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
@@ -176,6 +182,7 @@ public abstract class Receiver implements DataTransferProtocol {
   private void opReplaceBlock(DataInputStream in) throws IOException {
   private void opReplaceBlock(DataInputStream in) throws IOException {
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
     replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
     replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
+        PBHelper.convertStorageType(proto.getStorageType()),
         PBHelper.convert(proto.getHeader().getToken()),
         PBHelper.convert(proto.getHeader().getToken()),
         proto.getDelHint(),
         proto.getDelHint(),
         PBHelper.convert(proto.getSource()));
         PBHelper.convert(proto.getSource()));

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java

@@ -25,6 +25,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
@@ -111,9 +112,11 @@ public class Sender implements DataTransferProtocol {
 
 
   @Override
   @Override
   public void writeBlock(final ExtendedBlock blk,
   public void writeBlock(final ExtendedBlock blk,
+      final StorageType storageType, 
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
       final DatanodeInfo[] targets,
       final DatanodeInfo[] targets,
+      final StorageType[] targetStorageTypes, 
       final DatanodeInfo source,
       final DatanodeInfo source,
       final BlockConstructionStage stage,
       final BlockConstructionStage stage,
       final int pipelineSize,
       final int pipelineSize,
@@ -130,7 +133,9 @@ public class Sender implements DataTransferProtocol {
 
 
     OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
     OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
       .setHeader(header)
       .setHeader(header)
+      .setStorageType(PBHelper.convertStorageType(storageType))
       .addAllTargets(PBHelper.convert(targets, 1))
       .addAllTargets(PBHelper.convert(targets, 1))
+      .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
       .setStage(toProto(stage))
       .setStage(toProto(stage))
       .setPipelineSize(pipelineSize)
       .setPipelineSize(pipelineSize)
       .setMinBytesRcvd(minBytesRcvd)
       .setMinBytesRcvd(minBytesRcvd)
@@ -150,12 +155,14 @@ public class Sender implements DataTransferProtocol {
   public void transferBlock(final ExtendedBlock blk,
   public void transferBlock(final ExtendedBlock blk,
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String clientName,
       final String clientName,
-      final DatanodeInfo[] targets) throws IOException {
+      final DatanodeInfo[] targets,
+      final StorageType[] targetStorageTypes) throws IOException {
     
     
     OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
     OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildClientHeader(
       .setHeader(DataTransferProtoUtil.buildClientHeader(
           blk, clientName, blockToken))
           blk, clientName, blockToken))
       .addAllTargets(PBHelper.convert(targets))
       .addAllTargets(PBHelper.convert(targets))
+      .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes))
       .build();
       .build();
 
 
     send(out, Op.TRANSFER_BLOCK, proto);
     send(out, Op.TRANSFER_BLOCK, proto);
@@ -196,11 +203,13 @@ public class Sender implements DataTransferProtocol {
   
   
   @Override
   @Override
   public void replaceBlock(final ExtendedBlock blk,
   public void replaceBlock(final ExtendedBlock blk,
+      final StorageType storageType, 
       final Token<BlockTokenIdentifier> blockToken,
       final Token<BlockTokenIdentifier> blockToken,
       final String delHint,
       final String delHint,
       final DatanodeInfo source) throws IOException {
       final DatanodeInfo source) throws IOException {
     OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
     OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
       .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
       .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
+      .setStorageType(PBHelper.convertStorageType(storageType))
       .setDelHint(delHint)
       .setDelHint(delHint)
       .setSource(PBHelper.convertDatanodeInfo(source))
       .setSource(PBHelper.convertDatanodeInfo(source))
       .build();
       .build();

+ 1 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -97,7 +97,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
     RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
         ProtobufRpcEngine.class);
         ProtobufRpcEngine.class);
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
+    rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
   }
   }
 
 
   private static DatanodeProtocolPB createNamenode(
   private static DatanodeProtocolPB createNamenode(
@@ -109,33 +109,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
         org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
         org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
   }
   }
 
 
-  /** Create a {@link NameNode} proxy */
-  static DatanodeProtocolPB createNamenodeWithRetry(
-      DatanodeProtocolPB rpcNamenode) {
-    RetryPolicy createPolicy = RetryPolicies
-        .retryUpToMaximumCountWithFixedSleep(5,
-            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-
-    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = 
-        new HashMap<Class<? extends Exception>, RetryPolicy>();
-    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
-        createPolicy);
-
-    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
-        new HashMap<Class<? extends Exception>, RetryPolicy>();
-    exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
-        .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-            remoteExceptionToPolicyMap));
-    RetryPolicy methodPolicy = RetryPolicies.retryByException(
-        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
-    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
-
-    methodNameToPolicyMap.put("create", methodPolicy);
-
-    return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
-        rpcNamenode, methodNameToPolicyMap);
-  }
-
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     RPC.stopProxy(rpcProxy);
     RPC.stopProxy(rpcProxy);

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 
 
@@ -61,7 +62,7 @@ import com.google.protobuf.ServiceException;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 @InterfaceStability.Stable
 public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
 public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
-    ProtocolMetaInterface, Closeable {
+    ProtocolMetaInterface, Closeable, ProtocolTranslator {
   /** RpcController is not used and hence is set to null */
   /** RpcController is not used and hence is set to null */
   private final static RpcController NULL_CONTROLLER = null;
   private final static RpcController NULL_CONTROLLER = null;
   
   
@@ -88,6 +89,11 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
     RPC.stopProxy(rpcProxy);
     RPC.stopProxy(rpcProxy);
   }
   }
 
 
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+
   @Override
   @Override
   public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
   public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
       throws IOException {
       throws IOException {

+ 49 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -150,6 +150,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryLi
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
@@ -674,14 +675,8 @@ public class PBHelper {
       targets[i] = PBHelper.convert(locs.get(i));
       targets[i] = PBHelper.convert(locs.get(i));
     }
     }
 
 
-    final int storageTypesCount = proto.getStorageTypesCount();
-    final StorageType[] storageTypes;
-    if (storageTypesCount == 0) {
-      storageTypes = null;
-    } else {
-      Preconditions.checkState(storageTypesCount == locs.size());
-      storageTypes = convertStorageTypeProtos(proto.getStorageTypesList());
-    }
+    final StorageType[] storageTypes = convertStorageTypes(
+        proto.getStorageTypesList(), locs.size());
 
 
     final int storageIDsCount = proto.getStorageIDsCount();
     final int storageIDsCount = proto.getStorageIDsCount();
     final String[] storageIDs;
     final String[] storageIDs;
@@ -969,6 +964,20 @@ public class PBHelper {
       targets[i] = PBHelper.convert(targetList.get(i));
       targets[i] = PBHelper.convert(targetList.get(i));
     }
     }
 
 
+    StorageType[][] targetStorageTypes = new StorageType[targetList.size()][];
+    List<StorageTypesProto> targetStorageTypesList = blkCmd.getTargetStorageTypesList();
+    if (targetStorageTypesList.isEmpty()) { // missing storage types
+      for(int i = 0; i < targetStorageTypes.length; i++) {
+        targetStorageTypes[i] = new StorageType[targets[i].length];
+        Arrays.fill(targetStorageTypes[i], StorageType.DEFAULT);
+      }
+    } else {
+      for(int i = 0; i < targetStorageTypes.length; i++) {
+        List<StorageTypeProto> p = targetStorageTypesList.get(i).getStorageTypesList();
+        targetStorageTypes[i] = p.toArray(new StorageType[p.size()]);
+      }
+    }
+
     List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
     List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
     String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
     String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
     for(int i = 0; i < targetStorageIDs.length; i++) {
     for(int i = 0; i < targetStorageIDs.length; i++) {
@@ -991,7 +1000,7 @@ public class PBHelper {
       throw new AssertionError("Unknown action type: " + blkCmd.getAction());
       throw new AssertionError("Unknown action type: " + blkCmd.getAction());
     }
     }
     return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
     return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
-        targetStorageIDs);
+        targetStorageTypes, targetStorageIDs);
   }
   }
 
 
   public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) {
   public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) {
@@ -1605,8 +1614,25 @@ public class PBHelper {
     }
     }
   }
   }
 
 
-  private static StorageTypeProto convertStorageType(
-      StorageType type) {
+  public static List<StorageTypeProto> convertStorageTypes(
+      StorageType[] types) {
+    return convertStorageTypes(types, 0);
+  }
+
+  public static List<StorageTypeProto> convertStorageTypes(
+      StorageType[] types, int startIdx) {
+    if (types == null) {
+      return null;
+    }
+    final List<StorageTypeProto> protos = new ArrayList<StorageTypeProto>(
+        types.length);
+    for (int i = startIdx; i < types.length; ++i) {
+      protos.add(convertStorageType(types[i]));
+    }
+    return protos; 
+  }
+
+  public static StorageTypeProto convertStorageType(StorageType type) {
     switch(type) {
     switch(type) {
     case DISK:
     case DISK:
       return StorageTypeProto.DISK;
       return StorageTypeProto.DISK;
@@ -1621,7 +1647,7 @@ public class PBHelper {
   public static DatanodeStorage convert(DatanodeStorageProto s) {
   public static DatanodeStorage convert(DatanodeStorageProto s) {
     return new DatanodeStorage(s.getStorageUuid(),
     return new DatanodeStorage(s.getStorageUuid(),
                                PBHelper.convertState(s.getState()),
                                PBHelper.convertState(s.getState()),
-                               PBHelper.convertType(s.getStorageType()));
+                               PBHelper.convertStorageType(s.getStorageType()));
   }
   }
 
 
   private static State convertState(StorageState state) {
   private static State convertState(StorageState state) {
@@ -1634,7 +1660,7 @@ public class PBHelper {
     }
     }
   }
   }
 
 
-  private static StorageType convertType(StorageTypeProto type) {
+  public static StorageType convertStorageType(StorageTypeProto type) {
     switch(type) {
     switch(type) {
       case DISK:
       case DISK:
         return StorageType.DISK;
         return StorageType.DISK;
@@ -1646,11 +1672,16 @@ public class PBHelper {
     }
     }
   }
   }
 
 
-  private static StorageType[] convertStorageTypeProtos(
-      List<StorageTypeProto> storageTypesList) {
-    final StorageType[] storageTypes = new StorageType[storageTypesList.size()];
-    for (int i = 0; i < storageTypes.length; ++i) {
-      storageTypes[i] = PBHelper.convertType(storageTypesList.get(i));
+  public static StorageType[] convertStorageTypes(
+      List<StorageTypeProto> storageTypesList, int expectedSize) {
+    final StorageType[] storageTypes = new StorageType[expectedSize];
+    if (storageTypesList.size() != expectedSize) { // missing storage types
+      Preconditions.checkState(storageTypesList.isEmpty());
+      Arrays.fill(storageTypes, StorageType.DEFAULT);
+    } else {
+      for (int i = 0; i < storageTypes.length; ++i) {
+        storageTypes[i] = convertStorageType(storageTypesList.get(i));
+      }
     }
     }
     return storageTypes;
     return storageTypes;
   }
   }

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -368,7 +369,7 @@ public class Balancer {
         in = new DataInputStream(new BufferedInputStream(unbufIn,
         in = new DataInputStream(new BufferedInputStream(unbufIn,
             HdfsConstants.IO_FILE_BUFFER_SIZE));
             HdfsConstants.IO_FILE_BUFFER_SIZE));
         
         
-        sendRequest(out, eb, accessToken);
+        sendRequest(out, eb, StorageType.DEFAULT, accessToken);
         receiveResponse(in);
         receiveResponse(in);
         bytesMoved.addAndGet(block.getNumBytes());
         bytesMoved.addAndGet(block.getNumBytes());
         LOG.info("Successfully moved " + this);
         LOG.info("Successfully moved " + this);
@@ -400,8 +401,9 @@ public class Balancer {
     
     
     /* Send a block replace request to the output stream*/
     /* Send a block replace request to the output stream*/
     private void sendRequest(DataOutputStream out, ExtendedBlock eb,
     private void sendRequest(DataOutputStream out, ExtendedBlock eb,
+        StorageType storageType, 
         Token<BlockTokenIdentifier> accessToken) throws IOException {
         Token<BlockTokenIdentifier> accessToken) throws IOException {
-      new Sender(out).replaceBlock(eb, accessToken,
+      new Sender(out).replaceBlock(eb, storageType, accessToken,
           source.getStorageID(), proxySource.getDatanode());
           source.getStorageID(), proxySource.getDatanode());
     }
     }
     
     

+ 21 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -725,7 +725,6 @@ public class BlockManager {
     final List<DatanodeStorageInfo> locations
     final List<DatanodeStorageInfo> locations
         = new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block));
         = new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block));
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
-      final String storageID = storage.getStorageID();
       // filter invalidate replicas
       // filter invalidate replicas
       if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) {
       if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) {
         locations.add(storage);
         locations.add(storage);
@@ -2637,7 +2636,7 @@ public class BlockManager {
     if (addedNode == delNodeHint) {
     if (addedNode == delNodeHint) {
       delNodeHint = null;
       delNodeHint = null;
     }
     }
-    Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>();
+    Collection<DatanodeStorageInfo> nonExcess = new ArrayList<DatanodeStorageInfo>();
     Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
     Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
         .getNodes(block);
         .getNodes(block);
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) {
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) {
@@ -2657,7 +2656,7 @@ public class BlockManager {
         if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
         if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
           // exclude corrupt replicas
           // exclude corrupt replicas
           if (corruptNodes == null || !corruptNodes.contains(cur)) {
           if (corruptNodes == null || !corruptNodes.contains(cur)) {
-            nonExcess.add(cur);
+            nonExcess.add(storage);
           }
           }
         }
         }
       }
       }
@@ -2681,7 +2680,7 @@ public class BlockManager {
    * If no such a node is available,
    * If no such a node is available,
    * then pick a node with least free space
    * then pick a node with least free space
    */
    */
-  private void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, 
+  private void chooseExcessReplicates(final Collection<DatanodeStorageInfo> nonExcess, 
                               Block b, short replication,
                               Block b, short replication,
                               DatanodeDescriptor addedNode,
                               DatanodeDescriptor addedNode,
                               DatanodeDescriptor delNodeHint,
                               DatanodeDescriptor delNodeHint,
@@ -2689,28 +2688,33 @@ public class BlockManager {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
     // first form a rack to datanodes map and
     // first form a rack to datanodes map and
     BlockCollection bc = getBlockCollection(b);
     BlockCollection bc = getBlockCollection(b);
-    final Map<String, List<DatanodeDescriptor>> rackMap
-        = new HashMap<String, List<DatanodeDescriptor>>();
-    final List<DatanodeDescriptor> moreThanOne = new ArrayList<DatanodeDescriptor>();
-    final List<DatanodeDescriptor> exactlyOne = new ArrayList<DatanodeDescriptor>();
+
+    final Map<String, List<DatanodeStorageInfo>> rackMap
+        = new HashMap<String, List<DatanodeStorageInfo>>();
+    final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>();
+    final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>();
     
     
     // split nodes into two sets
     // split nodes into two sets
     // moreThanOne contains nodes on rack with more than one replica
     // moreThanOne contains nodes on rack with more than one replica
     // exactlyOne contains the remaining nodes
     // exactlyOne contains the remaining nodes
-    replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne,
-        exactlyOne);
+    replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne);
     
     
     // pick one node to delete that favors the delete hint
     // pick one node to delete that favors the delete hint
     // otherwise pick one with least space from priSet if it is not empty
     // otherwise pick one with least space from priSet if it is not empty
     // otherwise one node with least space from remains
     // otherwise one node with least space from remains
     boolean firstOne = true;
     boolean firstOne = true;
+    final DatanodeStorageInfo delNodeHintStorage
+        = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint);
+    final DatanodeStorageInfo addedNodeStorage
+        = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode);
     while (nonExcess.size() - replication > 0) {
     while (nonExcess.size() - replication > 0) {
       // check if we can delete delNodeHint
       // check if we can delete delNodeHint
-      final DatanodeInfo cur;
-      if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint)
-          && (moreThanOne.contains(delNodeHint)
-              || (addedNode != null && !moreThanOne.contains(addedNode))) ) {
-        cur = delNodeHint;
+      final DatanodeStorageInfo cur;
+      if (firstOne && delNodeHintStorage != null
+          && (moreThanOne.contains(delNodeHintStorage)
+              || (addedNodeStorage != null
+                  && !moreThanOne.contains(addedNodeStorage)))) {
+        cur = delNodeHintStorage;
       } else { // regular excessive replica removal
       } else { // regular excessive replica removal
         cur = replicator.chooseReplicaToDelete(bc, b, replication,
         cur = replicator.chooseReplicaToDelete(bc, b, replication,
         		moreThanOne, exactlyOne);
         		moreThanOne, exactlyOne);
@@ -2722,7 +2726,7 @@ public class BlockManager {
           exactlyOne, cur);
           exactlyOne, cur);
 
 
       nonExcess.remove(cur);
       nonExcess.remove(cur);
-      addToExcessReplicate(cur, b);
+      addToExcessReplicate(cur.getDatanodeDescriptor(), b);
 
 
       //
       //
       // The 'excessblocks' tracks blocks until we get confirmation
       // The 'excessblocks' tracks blocks until we get confirmation
@@ -2733,7 +2737,7 @@ public class BlockManager {
       // should be deleted.  Items are removed from the invalidate list
       // should be deleted.  Items are removed from the invalidate list
       // upon giving instructions to the namenode.
       // upon giving instructions to the namenode.
       //
       //
-      addToInvalidates(b, cur);
+      addToInvalidates(b, cur.getDatanodeDescriptor());
       blockLog.info("BLOCK* chooseExcessReplicates: "
       blockLog.info("BLOCK* chooseExcessReplicates: "
                 +"("+cur+", "+b+") is added to invalidated blocks set");
                 +"("+cur+", "+b+") is added to invalidated blocks set");
     }
     }

+ 34 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -124,11 +124,12 @@ public abstract class BlockPlacementPolicy {
                    listed in the previous parameter.
                    listed in the previous parameter.
    * @return the replica that is the best candidate for deletion
    * @return the replica that is the best candidate for deletion
    */
    */
-  abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
-                                      Block block, 
-                                      short replicationFactor,
-                                      Collection<DatanodeDescriptor> existingReplicas,
-                                      Collection<DatanodeDescriptor> moreExistingReplicas);
+  abstract public DatanodeStorageInfo chooseReplicaToDelete(
+      BlockCollection srcBC,
+      Block block, 
+      short replicationFactor,
+      Collection<DatanodeStorageInfo> existingReplicas,
+      Collection<DatanodeStorageInfo> moreExistingReplicas);
 
 
   /**
   /**
    * Used to setup a BlockPlacementPolicy object. This should be defined by 
    * Used to setup a BlockPlacementPolicy object. This should be defined by 
@@ -175,21 +176,23 @@ public abstract class BlockPlacementPolicy {
    * @param exactlyOne The List of replica nodes on rack with only one replica
    * @param exactlyOne The List of replica nodes on rack with only one replica
    * @param cur current replica to remove
    * @param cur current replica to remove
    */
    */
-  public void adjustSetsWithChosenReplica(final Map<String, 
-      List<DatanodeDescriptor>> rackMap,
-      final List<DatanodeDescriptor> moreThanOne,
-      final List<DatanodeDescriptor> exactlyOne, final DatanodeInfo cur) {
+  public void adjustSetsWithChosenReplica(
+      final Map<String, List<DatanodeStorageInfo>> rackMap,
+      final List<DatanodeStorageInfo> moreThanOne,
+      final List<DatanodeStorageInfo> exactlyOne,
+      final DatanodeStorageInfo cur) {
     
     
-    String rack = getRack(cur);
-    final List<DatanodeDescriptor> datanodes = rackMap.get(rack);
-    datanodes.remove(cur);
-    if (datanodes.isEmpty()) {
+    final String rack = getRack(cur.getDatanodeDescriptor());
+    final List<DatanodeStorageInfo> storages = rackMap.get(rack);
+    storages.remove(cur);
+    if (storages.isEmpty()) {
       rackMap.remove(rack);
       rackMap.remove(rack);
     }
     }
     if (moreThanOne.remove(cur)) {
     if (moreThanOne.remove(cur)) {
-      if (datanodes.size() == 1) {
-        moreThanOne.remove(datanodes.get(0));
-        exactlyOne.add(datanodes.get(0));
+      if (storages.size() == 1) {
+        final DatanodeStorageInfo remaining = storages.get(0);
+        moreThanOne.remove(remaining);
+        exactlyOne.add(remaining);
       }
       }
     } else {
     } else {
       exactlyOne.remove(cur);
       exactlyOne.remove(cur);
@@ -214,28 +217,28 @@ public abstract class BlockPlacementPolicy {
    * @param exactlyOne remains contains the remaining nodes
    * @param exactlyOne remains contains the remaining nodes
    */
    */
   public void splitNodesWithRack(
   public void splitNodesWithRack(
-      Collection<DatanodeDescriptor> dataNodes,
-      final Map<String, List<DatanodeDescriptor>> rackMap,
-      final List<DatanodeDescriptor> moreThanOne,
-      final List<DatanodeDescriptor> exactlyOne) {
-    for(DatanodeDescriptor node : dataNodes) {
-      final String rackName = getRack(node);
-      List<DatanodeDescriptor> datanodeList = rackMap.get(rackName);
-      if (datanodeList == null) {
-        datanodeList = new ArrayList<DatanodeDescriptor>();
-        rackMap.put(rackName, datanodeList);
+      final Iterable<DatanodeStorageInfo> storages,
+      final Map<String, List<DatanodeStorageInfo>> rackMap,
+      final List<DatanodeStorageInfo> moreThanOne,
+      final List<DatanodeStorageInfo> exactlyOne) {
+    for(DatanodeStorageInfo s: storages) {
+      final String rackName = getRack(s.getDatanodeDescriptor());
+      List<DatanodeStorageInfo> storageList = rackMap.get(rackName);
+      if (storageList == null) {
+        storageList = new ArrayList<DatanodeStorageInfo>();
+        rackMap.put(rackName, storageList);
       }
       }
-      datanodeList.add(node);
+      storageList.add(s);
     }
     }
     
     
     // split nodes into two sets
     // split nodes into two sets
-    for(List<DatanodeDescriptor> datanodeList : rackMap.values()) {
-      if (datanodeList.size() == 1) {
+    for(List<DatanodeStorageInfo> storageList : rackMap.values()) {
+      if (storageList.size() == 1) {
         // exactlyOne contains nodes on rack with only one replica
         // exactlyOne contains nodes on rack with only one replica
-        exactlyOne.add(datanodeList.get(0));
+        exactlyOne.add(storageList.get(0));
       } else {
       } else {
         // moreThanOne contains nodes on rack with more than one replica
         // moreThanOne contains nodes on rack with more than one replica
-        moreThanOne.addAll(datanodeList);
+        moreThanOne.addAll(storageList);
       }
       }
     }
     }
   }
   }

+ 32 - 27
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -145,14 +145,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>();
       List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>();
       boolean avoidStaleNodes = stats != null
       boolean avoidStaleNodes = stats != null
           && stats.isAvoidingStaleDataNodesForWrite();
           && stats.isAvoidingStaleDataNodesForWrite();
-      for (int i = 0; i < Math.min(favoredNodes.size(), numOfReplicas); i++) {
+      for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
         DatanodeDescriptor favoredNode = favoredNodes.get(i);
         DatanodeDescriptor favoredNode = favoredNodes.get(i);
         // Choose a single node which is local to favoredNode.
         // Choose a single node which is local to favoredNode.
         // 'results' is updated within chooseLocalNode
         // 'results' is updated within chooseLocalNode
         final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
         final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
             favoriteAndExcludedNodes, blocksize, 
             favoriteAndExcludedNodes, blocksize, 
             getMaxNodesPerRack(results.size(), numOfReplicas)[1],
             getMaxNodesPerRack(results.size(), numOfReplicas)[1],
-            results, avoidStaleNodes, storageType);
+            results, avoidStaleNodes, storageType, false);
         if (target == null) {
         if (target == null) {
           LOG.warn("Could not find a target for file " + src
           LOG.warn("Could not find a target for file " + src
               + " with favored node " + favoredNode); 
               + " with favored node " + favoredNode); 
@@ -271,7 +271,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     try {
     try {
       if (numOfResults == 0) {
       if (numOfResults == 0) {
         writer = chooseLocalStorage(writer, excludedNodes, blocksize,
         writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-            maxNodesPerRack, results, avoidStaleNodes, storageType)
+            maxNodesPerRack, results, avoidStaleNodes, storageType, true)
                 .getDatanodeDescriptor();
                 .getDatanodeDescriptor();
         if (--numOfReplicas == 0) {
         if (--numOfReplicas == 0) {
           return writer;
           return writer;
@@ -345,12 +345,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                                              int maxNodesPerRack,
                                              int maxNodesPerRack,
                                              List<DatanodeStorageInfo> results,
                                              List<DatanodeStorageInfo> results,
                                              boolean avoidStaleNodes,
                                              boolean avoidStaleNodes,
-                                             StorageType storageType)
+                                             StorageType storageType,
+                                             boolean fallbackToLocalRack)
       throws NotEnoughReplicasException {
       throws NotEnoughReplicasException {
     // if no local machine, randomly choose one node
     // if no local machine, randomly choose one node
-    if (localMachine == null)
+    if (localMachine == null) {
       return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
       return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
           maxNodesPerRack, results, avoidStaleNodes, storageType);
           maxNodesPerRack, results, avoidStaleNodes, storageType);
+    }
     if (preferLocalNode && localMachine instanceof DatanodeDescriptor) {
     if (preferLocalNode && localMachine instanceof DatanodeDescriptor) {
       DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
       DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
       // otherwise try local machine first
       // otherwise try local machine first
@@ -363,7 +365,11 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           }
           }
         }
         }
       } 
       } 
-    }      
+    }
+
+    if (!fallbackToLocalRack) {
+      return null;
+    }
     // try a node on local rack
     // try a node on local rack
     return chooseLocalRack(localMachine, excludedNodes, blocksize,
     return chooseLocalRack(localMachine, excludedNodes, blocksize,
         maxNodesPerRack, results, avoidStaleNodes, storageType);
         maxNodesPerRack, results, avoidStaleNodes, storageType);
@@ -636,15 +642,11 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
 
 
     // check the communication traffic of the target machine
     // check the communication traffic of the target machine
     if (considerLoad) {
     if (considerLoad) {
-      double avgLoad = 0;
-      if (stats != null) {
-        int size = stats.getNumDatanodesInService();
-        if (size != 0) {
-          avgLoad = (double)stats.getTotalLoad()/size;
-        }
-      }
-      if (node.getXceiverCount() > (2.0 * avgLoad)) {
-        logNodeIsNotChosen(storage, "the node is too busy ");
+      final double maxLoad = 2.0 * stats.getInServiceXceiverAverage();
+      final int nodeLoad = node.getXceiverCount();
+      if (nodeLoad > maxLoad) {
+        logNodeIsNotChosen(storage,
+            "the node is too busy (load:"+nodeLoad+" > "+maxLoad+") ");
         return false;
         return false;
       }
       }
     }
     }
@@ -727,31 +729,34 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
   }
 
 
   @Override
   @Override
-  public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
+  public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection bc,
       Block block, short replicationFactor,
       Block block, short replicationFactor,
-      Collection<DatanodeDescriptor> first,
-      Collection<DatanodeDescriptor> second) {
+      Collection<DatanodeStorageInfo> first,
+      Collection<DatanodeStorageInfo> second) {
     long oldestHeartbeat =
     long oldestHeartbeat =
       now() - heartbeatInterval * tolerateHeartbeatMultiplier;
       now() - heartbeatInterval * tolerateHeartbeatMultiplier;
-    DatanodeDescriptor oldestHeartbeatNode = null;
+    DatanodeStorageInfo oldestHeartbeatStorage = null;
     long minSpace = Long.MAX_VALUE;
     long minSpace = Long.MAX_VALUE;
-    DatanodeDescriptor minSpaceNode = null;
+    DatanodeStorageInfo minSpaceStorage = null;
 
 
     // Pick the node with the oldest heartbeat or with the least free space,
     // Pick the node with the oldest heartbeat or with the least free space,
     // if all hearbeats are within the tolerable heartbeat interval
     // if all hearbeats are within the tolerable heartbeat interval
-    for(DatanodeDescriptor node : pickupReplicaSet(first, second)) {
+    for(DatanodeStorageInfo storage : pickupReplicaSet(first, second)) {
+      final DatanodeDescriptor node = storage.getDatanodeDescriptor();
       long free = node.getRemaining();
       long free = node.getRemaining();
       long lastHeartbeat = node.getLastUpdate();
       long lastHeartbeat = node.getLastUpdate();
       if(lastHeartbeat < oldestHeartbeat) {
       if(lastHeartbeat < oldestHeartbeat) {
         oldestHeartbeat = lastHeartbeat;
         oldestHeartbeat = lastHeartbeat;
-        oldestHeartbeatNode = node;
+        oldestHeartbeatStorage = storage;
       }
       }
       if (minSpace > free) {
       if (minSpace > free) {
         minSpace = free;
         minSpace = free;
-        minSpaceNode = node;
+        minSpaceStorage = storage;
       }
       }
     }
     }
-    return oldestHeartbeatNode != null ? oldestHeartbeatNode : minSpaceNode;
+
+    return oldestHeartbeatStorage != null? oldestHeartbeatStorage
+        : minSpaceStorage;
   }
   }
 
 
   /**
   /**
@@ -760,9 +765,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * replica while second set contains remaining replica nodes.
    * replica while second set contains remaining replica nodes.
    * So pick up first set if not empty. If first is empty, then pick second.
    * So pick up first set if not empty. If first is empty, then pick second.
    */
    */
-  protected Collection<DatanodeDescriptor> pickupReplicaSet(
-      Collection<DatanodeDescriptor> first,
-      Collection<DatanodeDescriptor> second) {
+  protected Collection<DatanodeStorageInfo> pickupReplicaSet(
+      Collection<DatanodeStorageInfo> first,
+      Collection<DatanodeStorageInfo> second) {
     return first.isEmpty() ? second : first;
     return first.isEmpty() ? second : first;
   }
   }
   
   

Niektoré súbory nie sú zobrazené, pretože je v týchto rozdielových dátach zmenené mnoho súborov