浏览代码

Merge changes from trunk

Jing Zhao 11 年之前
父节点
当前提交
432b863719
共有 77 个文件被更改,包括 2862 次插入604 次删除
  1. 13 0
      hadoop-common-project/hadoop-auth/pom.xml
  2. 111 41
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  3. 3 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
  4. 5 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
  5. 5 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java
  6. 9 6
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
  7. 506 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
  8. 142 6
      hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
  9. 5 0
      hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
  10. 91 26
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  11. 55 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestJaasConfiguration.java
  12. 1 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
  13. 1 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java
  14. 17 6
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
  15. 7 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java
  16. 270 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
  17. 16 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  18. 1 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  19. 90 37
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  20. 0 81
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  21. 9 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  22. 94 0
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
  23. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  24. 30 2
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.c
  25. 53 0
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  26. 24 1
      hadoop-common-project/hadoop-kms/pom.xml
  27. 39 8
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
  28. 135 0
      hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml
  29. 7 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  30. 20 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  31. 13 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  32. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
  33. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  34. 36 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  35. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  36. 14 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
  37. 11 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
  38. 0 81
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
  39. 40 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
  40. 89 34
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  41. 20 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
  42. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  43. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  44. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  45. 8 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
  46. 12 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
  47. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  48. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  49. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  50. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
  51. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
  52. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
  53. 58 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
  54. 56 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
  55. 55 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
  56. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  57. 0 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
  58. 38 14
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
  59. 97 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
  60. 5 2
      hadoop-mapreduce-project/bin/mapred-config.sh
  61. 25 0
      hadoop-project/pom.xml
  62. 12 24
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
  63. 21 25
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
  64. 2 4
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
  65. 15 0
      hadoop-yarn-project/CHANGES.txt
  66. 7 4
      hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
  67. 35 17
      hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
  68. 14 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  69. 17 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
  70. 58 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSSleepingAppMaster.java
  71. 76 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
  72. 10 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
  73. 14 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
  74. 14 23
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/authorize/TimelinePolicyProvider.java
  75. 9 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
  76. 26 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
  77. 139 26
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java

+ 13 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -130,6 +130,19 @@
           </exclusion>
           </exclusion>
         </exclusions>
         </exclusions>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-framework</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-test</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>

+ 111 - 41
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.security.authentication.util.SignerException;
 import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -42,7 +43,7 @@ import java.util.*;
 
 
 /**
 /**
  * The {@link AuthenticationFilter} enables protecting web application resources with different (pluggable)
  * The {@link AuthenticationFilter} enables protecting web application resources with different (pluggable)
- * authentication mechanisms.
+ * authentication mechanisms and signer secret providers.
  * <p/>
  * <p/>
  * Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO.
  * Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO.
  * <p/>
  * <p/>
@@ -60,10 +61,13 @@ import java.util.*;
  * <li>[#PREFIX#.]type: simple|kerberos|#CLASS#, 'simple' is short for the
  * <li>[#PREFIX#.]type: simple|kerberos|#CLASS#, 'simple' is short for the
  * {@link PseudoAuthenticationHandler}, 'kerberos' is short for {@link KerberosAuthenticationHandler}, otherwise
  * {@link PseudoAuthenticationHandler}, 'kerberos' is short for {@link KerberosAuthenticationHandler}, otherwise
  * the full class name of the {@link AuthenticationHandler} must be specified.</li>
  * the full class name of the {@link AuthenticationHandler} must be specified.</li>
- * <li>[#PREFIX#.]signature.secret: the secret used to sign the HTTP cookie value. The default value is a random
- * value. Unless multiple webapp instances need to share the secret the random value is adequate.</li>
- * <li>[#PREFIX#.]token.validity: time -in seconds- that the generated token is valid before a
- * new authentication is triggered, default value is <code>3600</code> seconds.</li>
+ * <li>[#PREFIX#.]signature.secret: when signer.secret.provider is set to
+ * "string" or not specified, this is the value for the secret used to sign the
+ * HTTP cookie.</li>
+ * <li>[#PREFIX#.]token.validity: time -in seconds- that the generated token is
+ * valid before a new authentication is triggered, default value is
+ * <code>3600</code> seconds. This is also used for the rollover interval for
+ * the "random" and "zookeeper" SignerSecretProviders.</li>
  * <li>[#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.</li>
  * <li>[#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.</li>
  * <li>[#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.</li>
  * <li>[#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.</li>
  * </ul>
  * </ul>
@@ -72,6 +76,49 @@ import java.util.*;
  * {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove
  * {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove
  * the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do
  * the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do
  * not start with the prefix will not be passed to the authentication handler initialization.
  * not start with the prefix will not be passed to the authentication handler initialization.
+ * <p/>
+ * Out of the box it provides 3 signer secret provider implementations:
+ * "string", "random", and "zookeeper"
+ * <p/>
+ * Additional signer secret providers are supported via the
+ * {@link SignerSecretProvider} class.
+ * <p/>
+ * For the HTTP cookies mentioned above, the SignerSecretProvider is used to
+ * determine the secret to use for signing the cookies. Different
+ * implementations can have different behaviors.  The "string" implementation
+ * simply uses the string set in the [#PREFIX#.]signature.secret property
+ * mentioned above.  The "random" implementation uses a randomly generated
+ * secret that rolls over at the interval specified by the
+ * [#PREFIX#.]token.validity mentioned above.  The "zookeeper" implementation
+ * is like the "random" one, except that it synchronizes the random secret
+ * and rollovers between multiple servers; it's meant for HA services.
+ * <p/>
+ * The relevant configuration properties are:
+ * <ul>
+ * <li>signer.secret.provider: indicates the name of the SignerSecretProvider
+ * class to use. Possible values are: "string", "random", "zookeeper", or a
+ * classname. If not specified, the "string" implementation will be used with
+ * [#PREFIX#.]signature.secret; and if that's not specified, the "random"
+ * implementation will be used.</li>
+ * <li>[#PREFIX#.]signature.secret: When the "string" implementation is
+ * specified, this value is used as the secret.</li>
+ * <li>[#PREFIX#.]token.validity: When the "random" or "zookeeper"
+ * implementations are specified, this value is used as the rollover
+ * interval.</li>
+ * </ul>
+ * <p/>
+ * The "zookeeper" implementation has additional configuration properties that
+ * must be specified; see {@link ZKSignerSecretProvider} for details.
+ * <p/>
+ * For subclasses of AuthenticationFilter that want additional control over the
+ * SignerSecretProvider, they can use the following attribute set in the
+ * ServletContext:
+ * <ul>
+ * <li>signer.secret.provider.object: A SignerSecretProvider implementation can
+ * be passed here that will be used instead of the signer.secret.provider
+ * configuration property. Note that the class should already be
+ * initialized.</li>
+ * </ul>
  */
  */
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -112,20 +159,23 @@ public class AuthenticationFilter implements Filter {
 
 
   /**
   /**
    * Constant for the configuration property that indicates the name of the
    * Constant for the configuration property that indicates the name of the
-   * SignerSecretProvider class to use.  If not specified, SIGNATURE_SECRET
-   * will be used or a random secret.
+   * SignerSecretProvider class to use.
+   * Possible values are: "string", "random", "zookeeper", or a classname.
+   * If not specified, the "string" implementation will be used with
+   * SIGNATURE_SECRET; and if that's not specified, the "random" implementation
+   * will be used.
    */
    */
-  public static final String SIGNER_SECRET_PROVIDER_CLASS =
+  public static final String SIGNER_SECRET_PROVIDER =
           "signer.secret.provider";
           "signer.secret.provider";
 
 
   /**
   /**
-   * Constant for the attribute that can be used for providing a custom
-   * object that subclasses the SignerSecretProvider.  Note that this should be
-   * set in the ServletContext and the class should already be initialized.  
-   * If not specified, SIGNER_SECRET_PROVIDER_CLASS will be used.
+   * Constant for the ServletContext attribute that can be used for providing a
+   * custom implementation of the SignerSecretProvider. Note that the class
+   * should already be initialized. If not specified, SIGNER_SECRET_PROVIDER
+   * will be used.
    */
    */
-  public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
-      "org.apache.hadoop.security.authentication.util.SignerSecretProvider";
+  public static final String SIGNER_SECRET_PROVIDER_ATTRIBUTE =
+      "signer.secret.provider.object";
 
 
   private Properties config;
   private Properties config;
   private Signer signer;
   private Signer signer;
@@ -138,7 +188,7 @@ public class AuthenticationFilter implements Filter {
   private String cookiePath;
   private String cookiePath;
 
 
   /**
   /**
-   * Initializes the authentication filter.
+   * Initializes the authentication filter and signer secret provider.
    * <p/>
    * <p/>
    * It instantiates and initializes the specified {@link AuthenticationHandler}.
    * It instantiates and initializes the specified {@link AuthenticationHandler}.
    * <p/>
    * <p/>
@@ -184,35 +234,19 @@ public class AuthenticationFilter implements Filter {
     validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
     validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
         * 1000; //10 hours
         * 1000; //10 hours
     secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
     secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
-        getAttribute(SIGNATURE_PROVIDER_ATTRIBUTE);
+        getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
     if (secretProvider == null) {
     if (secretProvider == null) {
-      String signerSecretProviderClassName =
-          config.getProperty(configPrefix + SIGNER_SECRET_PROVIDER_CLASS, null);
-      if (signerSecretProviderClassName == null) {
-        String signatureSecret =
-            config.getProperty(configPrefix + SIGNATURE_SECRET, null);
-        if (signatureSecret != null) {
-          secretProvider = new StringSignerSecretProvider(signatureSecret);
-        } else {
-          secretProvider = new RandomSignerSecretProvider();
-          randomSecret = true;
-        }
-      } else {
-        try {
-          Class<?> klass = Thread.currentThread().getContextClassLoader().
-              loadClass(signerSecretProviderClassName);
-          secretProvider = (SignerSecretProvider) klass.newInstance();
-          customSecretProvider = true;
-        } catch (ClassNotFoundException ex) {
-          throw new ServletException(ex);
-        } catch (InstantiationException ex) {
-          throw new ServletException(ex);
-        } catch (IllegalAccessException ex) {
-          throw new ServletException(ex);
-        }
+      Class<? extends SignerSecretProvider> providerClass
+              = getProviderClass(config);
+      try {
+        secretProvider = providerClass.newInstance();
+      } catch (InstantiationException ex) {
+        throw new ServletException(ex);
+      } catch (IllegalAccessException ex) {
+        throw new ServletException(ex);
       }
       }
       try {
       try {
-        secretProvider.init(config, validity);
+        secretProvider.init(config, filterConfig.getServletContext(), validity);
       } catch (Exception ex) {
       } catch (Exception ex) {
         throw new ServletException(ex);
         throw new ServletException(ex);
       }
       }
@@ -225,6 +259,42 @@ public class AuthenticationFilter implements Filter {
     cookiePath = config.getProperty(COOKIE_PATH, null);
     cookiePath = config.getProperty(COOKIE_PATH, null);
   }
   }
 
 
+  @SuppressWarnings("unchecked")
+  private Class<? extends SignerSecretProvider> getProviderClass(Properties config)
+          throws ServletException {
+    String providerClassName;
+    String signerSecretProviderName
+            = config.getProperty(SIGNER_SECRET_PROVIDER, null);
+    // fallback to old behavior
+    if (signerSecretProviderName == null) {
+      String signatureSecret = config.getProperty(SIGNATURE_SECRET, null);
+      if (signatureSecret != null) {
+        providerClassName = StringSignerSecretProvider.class.getName();
+      } else {
+        providerClassName = RandomSignerSecretProvider.class.getName();
+        randomSecret = true;
+      }
+    } else {
+      if ("random".equals(signerSecretProviderName)) {
+        providerClassName = RandomSignerSecretProvider.class.getName();
+        randomSecret = true;
+      } else if ("string".equals(signerSecretProviderName)) {
+        providerClassName = StringSignerSecretProvider.class.getName();
+      } else if ("zookeeper".equals(signerSecretProviderName)) {
+        providerClassName = ZKSignerSecretProvider.class.getName();
+      } else {
+        providerClassName = signerSecretProviderName;
+        customSecretProvider = true;
+      }
+    }
+    try {
+      return (Class<? extends SignerSecretProvider>) Thread.currentThread().
+              getContextClassLoader().loadClass(providerClassName);
+    } catch (ClassNotFoundException ex) {
+      throw new ServletException(ex);
+    }
+  }
+
   /**
   /**
    * Returns the configuration properties of the {@link AuthenticationFilter}
    * Returns the configuration properties of the {@link AuthenticationFilter}
    * without the prefix. The returned properties are the same that the
    * without the prefix. The returned properties are the same that the

+ 3 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java

@@ -13,12 +13,13 @@
  */
  */
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Random;
 import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
- * A SignerSecretProvider that uses a random number as it's secret.  It rolls
+ * A SignerSecretProvider that uses a random number as its secret.  It rolls
  * the secret at a regular interval.
  * the secret at a regular interval.
  */
  */
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
@@ -37,6 +38,7 @@ public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
    * is meant for testing.
    * is meant for testing.
    * @param seed the seed for the random number generator
    * @param seed the seed for the random number generator
    */
    */
+  @VisibleForTesting
   public RandomSignerSecretProvider(long seed) {
   public RandomSignerSecretProvider(long seed) {
     super();
     super();
     rand = new Random(seed);
     rand = new Random(seed);

+ 5 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java

@@ -17,6 +17,7 @@ import java.util.Properties;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
+import javax.servlet.ServletContext;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -57,12 +58,14 @@ public abstract class RolloverSignerSecretProvider
    * Initialize the SignerSecretProvider.  It initializes the current secret
    * Initialize the SignerSecretProvider.  It initializes the current secret
    * and starts the scheduler for the rollover to run at an interval of
    * and starts the scheduler for the rollover to run at an interval of
    * tokenValidity.
    * tokenValidity.
-   * @param config filter configuration
+   * @param config configuration properties
+   * @param servletContext servlet context
    * @param tokenValidity The amount of time a token is valid for
    * @param tokenValidity The amount of time a token is valid for
    * @throws Exception
    * @throws Exception
    */
    */
   @Override
   @Override
-  public void init(Properties config, long tokenValidity) throws Exception {
+  public void init(Properties config, ServletContext servletContext,
+          long tokenValidity) throws Exception {
     initSecrets(generateNewSecret(), null);
     initSecrets(generateNewSecret(), null);
     startScheduler(tokenValidity, tokenValidity);
     startScheduler(tokenValidity, tokenValidity);
   }
   }

+ 5 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java

@@ -14,6 +14,7 @@
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
 import java.util.Properties;
 import java.util.Properties;
+import javax.servlet.ServletContext;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
@@ -30,13 +31,13 @@ public abstract class SignerSecretProvider {
 
 
   /**
   /**
    * Initialize the SignerSecretProvider
    * Initialize the SignerSecretProvider
-   * @param config filter configuration
+   * @param config configuration properties
+   * @param servletContext servlet context
    * @param tokenValidity The amount of time a token is valid for
    * @param tokenValidity The amount of time a token is valid for
    * @throws Exception
    * @throws Exception
    */
    */
-  public abstract void init(Properties config, long tokenValidity)
-      throws Exception;
-
+  public abstract void init(Properties config, ServletContext servletContext,
+          long tokenValidity) throws Exception;
   /**
   /**
    * Will be called on shutdown; subclasses should perform any cleanup here.
    * Will be called on shutdown; subclasses should perform any cleanup here.
    */
    */

+ 9 - 6
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java

@@ -14,8 +14,10 @@
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
 import java.util.Properties;
 import java.util.Properties;
+import javax.servlet.ServletContext;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 
 
 /**
 /**
  * A SignerSecretProvider that simply creates a secret based on a given String.
  * A SignerSecretProvider that simply creates a secret based on a given String.
@@ -27,14 +29,15 @@ public class StringSignerSecretProvider extends SignerSecretProvider {
   private byte[] secret;
   private byte[] secret;
   private byte[][] secrets;
   private byte[][] secrets;
 
 
-  public StringSignerSecretProvider(String secretStr) {
-    secret = secretStr.getBytes();
-    secrets = new byte[][]{secret};
-  }
+  public StringSignerSecretProvider() {}
 
 
   @Override
   @Override
-  public void init(Properties config, long tokenValidity) throws Exception {
-    // do nothing
+  public void init(Properties config, ServletContext servletContext,
+          long tokenValidity) throws Exception {
+    String signatureSecret = config.getProperty(
+            AuthenticationFilter.SIGNATURE_SECRET, null);
+    secret = signatureSecret.getBytes();
+    secrets = new byte[][]{secret};
   }
   }
 
 
   @Override
   @Override

+ 506 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java

@@ -0,0 +1,506 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.servlet.ServletContext;
+import org.apache.curator.RetryPolicy;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.ACLProvider;
+import org.apache.curator.framework.imps.DefaultACLProvider;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs.Perms;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A SignerSecretProvider that synchronizes a rolling random secret between
+ * multiple servers using ZooKeeper.
+ * <p/>
+ * It works by storing the secrets and next rollover time in a ZooKeeper znode.
+ * All ZKSignerSecretProviders looking at that znode will use those
+ * secrets and next rollover time to ensure they are synchronized.  There is no
+ * "leader" -- any of the ZKSignerSecretProviders can choose the next secret;
+ * which one is indeterminate.  Kerberos-based ACLs can also be enforced to
+ * prevent a malicious third-party from getting or setting the secrets.  It uses
+ * its own CuratorFramework client for talking to ZooKeeper.  If you want to use
+ * your own Curator client, you can pass it to ZKSignerSecretProvider; see
+ * {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}
+ * for more details.
+ * <p/>
+ * The supported configuration properties are:
+ * <ul>
+ * <li>signer.secret.provider.zookeeper.connection.string: indicates the
+ * ZooKeeper connection string to connect with.</li>
+ * <li>signer.secret.provider.zookeeper.path: indicates the ZooKeeper path
+ * to use for storing and retrieving the secrets.  All ZKSignerSecretProviders
+ * that need to coordinate should point to the same path.</li>
+ * <li>signer.secret.provider.zookeeper.auth.type: indicates the auth type to
+ * use.  Supported values are "none" and "sasl".  The default value is "none"
+ * </li>
+ * <li>signer.secret.provider.zookeeper.kerberos.keytab: set this to the path
+ * with the Kerberos keytab file.  This is only required if using Kerberos.</li>
+ * <li>signer.secret.provider.zookeeper.kerberos.principal: set this to the
+ * Kerberos principal to use.  This only required if using Kerberos.</li>
+ * <li>signer.secret.provider.zookeeper.disconnect.on.close: when set to "true",
+ * ZKSignerSecretProvider will close the ZooKeeper connection on shutdown.  The
+ * default is "true". Only set this to "false" if a custom Curator client is
+ * being provided and the disconnection is being handled elsewhere.</li>
+ * </ul>
+ *
+ * The following attribute in the ServletContext can also be set if desired:
+ * <li>signer.secret.provider.zookeeper.curator.client: A CuratorFramework
+ * client object can be passed here. If given, the "zookeeper" implementation
+ * will use this Curator client instead of creating its own, which is useful if
+ * you already have a Curator client or want more control over its
+ * configuration.</li>
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
+
+  private static final String CONFIG_PREFIX =
+          "signer.secret.provider.zookeeper.";
+
+  /**
+   * Constant for the property that specifies the ZooKeeper connection string.
+   */
+  public static final String ZOOKEEPER_CONNECTION_STRING =
+          CONFIG_PREFIX + "connection.string";
+
+  /**
+   * Constant for the property that specifies the ZooKeeper path.
+   */
+  public static final String ZOOKEEPER_PATH = CONFIG_PREFIX + "path";
+
+  /**
+   * Constant for the property that specifies the auth type to use.  Supported
+   * values are "none" and "sasl".  The default value is "none".
+   */
+  public static final String ZOOKEEPER_AUTH_TYPE = CONFIG_PREFIX + "auth.type";
+
+  /**
+   * Constant for the property that specifies the Kerberos keytab file.
+   */
+  public static final String ZOOKEEPER_KERBEROS_KEYTAB =
+          CONFIG_PREFIX + "kerberos.keytab";
+
+  /**
+   * Constant for the property that specifies the Kerberos principal.
+   */
+  public static final String ZOOKEEPER_KERBEROS_PRINCIPAL =
+          CONFIG_PREFIX + "kerberos.principal";
+
+  /**
+   * Constant for the property that specifies whether or not the Curator client
+   * should disconnect from ZooKeeper on shutdown.  The default is "true".  Only
+   * set this to "false" if a custom Curator client is being provided and the
+   * disconnection is being handled elsewhere.
+   */
+  public static final String DISCONNECT_FROM_ZOOKEEPER_ON_SHUTDOWN =
+          CONFIG_PREFIX + "disconnect.on.shutdown";
+
+  /**
+   * Constant for the ServletContext attribute that can be used for providing a
+   * custom CuratorFramework client. If set ZKSignerSecretProvider will use this
+   * Curator client instead of creating a new one. The providing class is
+   * responsible for creating and configuring the Curator client (including
+   * security and ACLs) in this case.
+   */
+  public static final String
+      ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE =
+      CONFIG_PREFIX + "curator.client";
+
+  private static final String JAAS_LOGIN_ENTRY_NAME =
+          "ZKSignerSecretProviderClient";
+
+  private static Logger LOG = LoggerFactory.getLogger(
+          ZKSignerSecretProvider.class);
+  private String path;
+  /**
+   * Stores the next secret that will be used after the current one rolls over.
+   * We do this to help with rollover performance by actually deciding the next
+   * secret at the previous rollover.  This allows us to switch to the next
+   * secret very quickly.  Afterwards, we have plenty of time to decide on the
+   * next secret.
+   */
+  private volatile byte[] nextSecret;
+  private final Random rand;
+  /**
+   * Stores the current version of the znode.
+   */
+  private int zkVersion;
+  /**
+   * Stores the next date that the rollover will occur.  This is only used
+   * for allowing new servers joining later to synchronize their rollover
+   * with everyone else.
+   */
+  private long nextRolloverDate;
+  private long tokenValidity;
+  private CuratorFramework client;
+  private boolean shouldDisconnect;
+  private static int INT_BYTES = Integer.SIZE / Byte.SIZE;
+  private static int LONG_BYTES = Long.SIZE / Byte.SIZE;
+  private static int DATA_VERSION = 0;
+
+  public ZKSignerSecretProvider() {
+    super();
+    rand = new Random();
+  }
+
+  /**
+   * This constructor lets you set the seed of the Random Number Generator and
+   * is meant for testing.
+   * @param seed the seed for the random number generator
+   */
+  @VisibleForTesting
+  public ZKSignerSecretProvider(long seed) {
+    super();
+    rand = new Random(seed);
+  }
+
+  @Override
+  public void init(Properties config, ServletContext servletContext,
+          long tokenValidity) throws Exception {
+    Object curatorClientObj = servletContext.getAttribute(
+            ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE);
+    if (curatorClientObj != null
+            && curatorClientObj instanceof CuratorFramework) {
+      client = (CuratorFramework) curatorClientObj;
+    } else {
+      client = createCuratorClient(config);
+    }
+    this.tokenValidity = tokenValidity;
+    shouldDisconnect = Boolean.parseBoolean(
+            config.getProperty(DISCONNECT_FROM_ZOOKEEPER_ON_SHUTDOWN, "true"));
+    path = config.getProperty(ZOOKEEPER_PATH);
+    if (path == null) {
+      throw new IllegalArgumentException(ZOOKEEPER_PATH
+              + " must be specified");
+    }
+    try {
+      nextRolloverDate = System.currentTimeMillis() + tokenValidity;
+      // everyone tries to do this, only one will succeed and only when the
+      // znode doesn't already exist.  Everyone else will synchronize on the
+      // data from the znode
+      client.create().creatingParentsIfNeeded()
+              .forPath(path, generateZKData(generateRandomSecret(),
+              generateRandomSecret(), null));
+      zkVersion = 0;
+      LOG.info("Creating secret znode");
+    } catch (KeeperException.NodeExistsException nee) {
+      LOG.info("The secret znode already exists, retrieving data");
+    }
+    // Synchronize on the data from the znode
+    // passing true tells it to parse out all the data for initing
+    pullFromZK(true);
+    long initialDelay = nextRolloverDate - System.currentTimeMillis();
+    // If it's in the past, try to find the next interval that we should
+    // be using
+    if (initialDelay < 1l) {
+      int i = 1;
+      while (initialDelay < 1l) {
+        initialDelay = nextRolloverDate + tokenValidity * i
+                - System.currentTimeMillis();
+        i++;
+      }
+    }
+    super.startScheduler(initialDelay, tokenValidity);
+  }
+
+  /**
+   * Disconnects from ZooKeeper unless told not to.
+   */
+  @Override
+  public void destroy() {
+    if (shouldDisconnect && client != null) {
+      client.close();
+    }
+    super.destroy();
+  }
+
+  @Override
+  protected synchronized void rollSecret() {
+    super.rollSecret();
+    // Try to push the information to ZooKeeper with a potential next secret.
+    nextRolloverDate += tokenValidity;
+    byte[][] secrets = super.getAllSecrets();
+    pushToZK(generateRandomSecret(), secrets[0], secrets[1]);
+    // Pull info from ZooKeeper to get the decided next secret
+    // passing false tells it that we don't care about most of the data
+    pullFromZK(false);
+  }
+
+  @Override
+  protected byte[] generateNewSecret() {
+    // We simply return nextSecret because it's already been decided on
+    return nextSecret;
+  }
+
+  /**
+   * Pushes proposed data to ZooKeeper.  If a different server pushes its data
+   * first, it gives up.
+   * @param newSecret The new secret to use
+   * @param currentSecret The current secret
+   * @param previousSecret  The previous secret
+   */
+  private synchronized void pushToZK(byte[] newSecret, byte[] currentSecret,
+          byte[] previousSecret) {
+    byte[] bytes = generateZKData(newSecret, currentSecret, previousSecret);
+    try {
+      client.setData().withVersion(zkVersion).forPath(path, bytes);
+    } catch (KeeperException.BadVersionException bve) {
+      LOG.debug("Unable to push to znode; another server already did it");
+    } catch (Exception ex) {
+      LOG.error("An unexpected exception occured pushing data to ZooKeeper",
+              ex);
+    }
+  }
+
+  /**
+   * Serialize the data to attempt to push into ZooKeeper.  The format is this:
+   * <p>
+   * [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, previousSecretLength, previousSecret, nextRolloverDate]
+   * <p>
+   * Only previousSecret can be null, in which case the format looks like this:
+   * <p>
+   * [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, 0, nextRolloverDate]
+   * <p>
+   * @param newSecret The new secret to use
+   * @param currentSecret The current secret
+   * @param previousSecret The previous secret
+   * @return The serialized data for ZooKeeper
+   */
+  private synchronized byte[] generateZKData(byte[] newSecret,
+          byte[] currentSecret, byte[] previousSecret) {
+    int newSecretLength = newSecret.length;
+    int currentSecretLength = currentSecret.length;
+    int previousSecretLength = 0;
+    if (previousSecret != null) {
+      previousSecretLength = previousSecret.length;
+    }
+    ByteBuffer bb = ByteBuffer.allocate(INT_BYTES + INT_BYTES + newSecretLength
+        + INT_BYTES + currentSecretLength + INT_BYTES + previousSecretLength
+        + LONG_BYTES);
+    bb.putInt(DATA_VERSION);
+    bb.putInt(newSecretLength);
+    bb.put(newSecret);
+    bb.putInt(currentSecretLength);
+    bb.put(currentSecret);
+    bb.putInt(previousSecretLength);
+    if (previousSecretLength > 0) {
+      bb.put(previousSecret);
+    }
+    bb.putLong(nextRolloverDate);
+    return bb.array();
+  }
+
+  /**
+   * Pulls data from ZooKeeper.  If isInit is false, it will only parse the
+   * next secret and version.  If isInit is true, it will also parse the current
+   * and previous secrets, and the next rollover date; it will also init the
+   * secrets.  Hence, isInit should only be true on startup.
+   * @param isInit  see description above
+   */
+  private synchronized void pullFromZK(boolean isInit) {
+    try {
+      Stat stat = new Stat();
+      byte[] bytes = client.getData().storingStatIn(stat).forPath(path);
+      ByteBuffer bb = ByteBuffer.wrap(bytes);
+      int dataVersion = bb.getInt();
+      if (dataVersion > DATA_VERSION) {
+        throw new IllegalStateException("Cannot load data from ZooKeeper; it"
+                + "was written with a newer version");
+      }
+      int nextSecretLength = bb.getInt();
+      byte[] nextSecret = new byte[nextSecretLength];
+      bb.get(nextSecret);
+      this.nextSecret = nextSecret;
+      zkVersion = stat.getVersion();
+      if (isInit) {
+        int currentSecretLength = bb.getInt();
+        byte[] currentSecret = new byte[currentSecretLength];
+        bb.get(currentSecret);
+        int previousSecretLength = bb.getInt();
+        byte[] previousSecret = null;
+        if (previousSecretLength > 0) {
+          previousSecret = new byte[previousSecretLength];
+          bb.get(previousSecret);
+        }
+        super.initSecrets(currentSecret, previousSecret);
+        nextRolloverDate = bb.getLong();
+      }
+    } catch (Exception ex) {
+      LOG.error("An unexpected exception occurred while pulling data from"
+              + "ZooKeeper", ex);
+    }
+  }
+
+  private byte[] generateRandomSecret() {
+    return Long.toString(rand.nextLong()).getBytes();
+  }
+
+  /**
+   * This method creates the Curator client and connects to ZooKeeper.
+   * @param config configuration properties
+   * @return A Curator client
+   * @throws java.lang.Exception
+   */
+  protected CuratorFramework createCuratorClient(Properties config)
+          throws Exception {
+    String connectionString = config.getProperty(
+            ZOOKEEPER_CONNECTION_STRING, "localhost:2181");
+
+    RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
+    ACLProvider aclProvider;
+    String authType = config.getProperty(ZOOKEEPER_AUTH_TYPE, "none");
+    if (authType.equals("sasl")) {
+      LOG.info("Connecting to ZooKeeper with SASL/Kerberos"
+              + "and using 'sasl' ACLs");
+      String principal = setJaasConfiguration(config);
+      System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
+              JAAS_LOGIN_ENTRY_NAME);
+      System.setProperty("zookeeper.authProvider.1",
+              "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
+      aclProvider = new SASLOwnerACLProvider(principal);
+    } else {  // "none"
+      LOG.info("Connecting to ZooKeeper without authentication");
+      aclProvider = new DefaultACLProvider();     // open to everyone
+    }
+    CuratorFramework cf = CuratorFrameworkFactory.builder()
+            .connectString(connectionString)
+            .retryPolicy(retryPolicy)
+            .aclProvider(aclProvider)
+            .build();
+    cf.start();
+    return cf;
+  }
+
+  private String setJaasConfiguration(Properties config) throws Exception {
+    String keytabFile = config.getProperty(ZOOKEEPER_KERBEROS_KEYTAB).trim();
+    if (keytabFile == null || keytabFile.length() == 0) {
+      throw new IllegalArgumentException(ZOOKEEPER_KERBEROS_KEYTAB
+              + " must be specified");
+    }
+    String principal = config.getProperty(ZOOKEEPER_KERBEROS_PRINCIPAL)
+            .trim();
+    if (principal == null || principal.length() == 0) {
+      throw new IllegalArgumentException(ZOOKEEPER_KERBEROS_PRINCIPAL
+              + " must be specified");
+    }
+
+    // This is equivalent to writing a jaas.conf file and setting the system
+    // property, "java.security.auth.login.config", to point to it
+    JaasConfiguration jConf =
+            new JaasConfiguration(JAAS_LOGIN_ENTRY_NAME, principal, keytabFile);
+    Configuration.setConfiguration(jConf);
+    return principal.split("[/@]")[0];
+  }
+
+  /**
+   * Simple implementation of an {@link ACLProvider} that simply returns an ACL
+   * that gives all permissions only to a single principal.
+   */
+  private static class SASLOwnerACLProvider implements ACLProvider {
+
+    private final List<ACL> saslACL;
+
+    private SASLOwnerACLProvider(String principal) {
+      this.saslACL = Collections.singletonList(
+              new ACL(Perms.ALL, new Id("sasl", principal)));
+    }
+
+    @Override
+    public List<ACL> getDefaultAcl() {
+      return saslACL;
+    }
+
+    @Override
+    public List<ACL> getAclForPath(String path) {
+      return saslACL;
+    }
+  }
+
+  /**
+   * Creates a programmatic version of a jaas.conf file. This can be used
+   * instead of writing a jaas.conf file and setting the system property,
+   * "java.security.auth.login.config", to point to that file. It is meant to be
+   * used for connecting to ZooKeeper.
+   */
+  @InterfaceAudience.Private
+  public static class JaasConfiguration extends Configuration {
+
+    private static AppConfigurationEntry[] entry;
+    private String entryName;
+
+    /**
+     * Add an entry to the jaas configuration with the passed in name,
+     * principal, and keytab. The other necessary options will be set for you.
+     *
+     * @param entryName The name of the entry (e.g. "Client")
+     * @param principal The principal of the user
+     * @param keytab The location of the keytab
+     */
+    public JaasConfiguration(String entryName, String principal, String keytab) {
+      this.entryName = entryName;
+      Map<String, String> options = new HashMap<String, String>();
+      options.put("keyTab", keytab);
+      options.put("principal", principal);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("useTicketCache", "false");
+      options.put("refreshKrb5Config", "true");
+      String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
+      if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
+        options.put("debug", "true");
+      }
+      entry = new AppConfigurationEntry[]{
+                  new AppConfigurationEntry(getKrb5LoginModuleName(),
+                  AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                  options)};
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+      return (entryName.equals(name)) ? entry : null;
+    }
+
+    private String getKrb5LoginModuleName() {
+      String krb5LoginModuleName;
+      if (System.getProperty("java.vendor").contains("IBM")) {
+        krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
+      } else {
+        krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
+      }
+      return krb5LoginModuleName;
+    }
+  }
+}

+ 142 - 6
hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm

@@ -45,14 +45,14 @@ Configuration
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
     <<<kerberos>>>) or a Authentication handler implementation.
     <<<kerberos>>>) or a Authentication handler implementation.
 
 
-  * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
-    authentication tokens. If a secret is not provided a random secret is
-    generated at start up time. If using multiple web application instances
-    behind a load-balancer a secret must be set for the application to work
-    properly.
+  * <<<[PREFIX.]signature.secret>>>: When <<<signer.secret.provider>>> is set to
+    <<<string>>> or not specified, this is the value for the secret used to sign
+    the HTTP cookie.
 
 
   * <<<[PREFIX.]token.validity>>>: The validity -in seconds- of the generated
   * <<<[PREFIX.]token.validity>>>: The validity -in seconds- of the generated
-    authentication token. The default value is <<<3600>>> seconds.
+    authentication token. The default value is <<<3600>>> seconds. This is also
+    used for the rollover interval when <<<signer.secret.provider>>> is set to
+    <<<random>>> or <<<zookeeper>>>.
 
 
   * <<<[PREFIX.]cookie.domain>>>: domain to use for the HTTP cookie that stores
   * <<<[PREFIX.]cookie.domain>>>: domain to use for the HTTP cookie that stores
     the authentication token.
     the authentication token.
@@ -60,6 +60,12 @@ Configuration
   * <<<[PREFIX.]cookie.path>>>: path to use for the HTTP cookie that stores the
   * <<<[PREFIX.]cookie.path>>>: path to use for the HTTP cookie that stores the
     authentication token.
     authentication token.
 
 
+  * <<<signer.secret.provider>>>: indicates the name of the SignerSecretProvider
+    class to use. Possible values are: <<<string>>>, <<<random>>>,
+    <<<zookeeper>>>, or a classname. If not specified, the <<<string>>>
+    implementation will be used; and failing that, the <<<random>>>
+    implementation will be used.
+
 ** Kerberos Configuration
 ** Kerberos Configuration
 
 
   <<IMPORTANT>>: A KDC must be configured and running.
   <<IMPORTANT>>: A KDC must be configured and running.
@@ -239,3 +245,133 @@ Configuration
     ...
     ...
 </web-app>
 </web-app>
 +---+
 +---+
+
+** SignerSecretProvider Configuration
+
+  The SignerSecretProvider is used to provide more advanced behaviors for the
+  secret used for signing the HTTP Cookies.
+
+  These are the relevant configuration properties:
+
+    * <<<signer.secret.provider>>>: indicates the name of the
+      SignerSecretProvider class to use. Possible values are: "string",
+      "random", "zookeeper", or a classname. If not specified, the "string"
+      implementation will be used; and failing that, the "random" implementation
+      will be used.
+
+    * <<<[PREFIX.]signature.secret>>>: When <<<signer.secret.provider>>> is set
+      to <<<string>>> or not specified, this is the value for the secret used to
+      sign the HTTP cookie.
+
+    * <<<[PREFIX.]token.validity>>>: The validity -in seconds- of the generated
+      authentication token. The default value is <<<3600>>> seconds. This is
+      also used for the rollover interval when <<<signer.secret.provider>>> is
+      set to <<<random>>> or <<<zookeeper>>>.
+
+  The following configuration properties are specific to the <<<zookeeper>>>
+  implementation:
+
+    * <<<signer.secret.provider.zookeeper.connection.string>>>: Indicates the
+      ZooKeeper connection string to connect with.
+
+    * <<<signer.secret.provider.zookeeper.path>>>: Indicates the ZooKeeper path
+      to use for storing and retrieving the secrets.  All servers
+      that need to coordinate their secret should point to the same path
+
+    * <<<signer.secret.provider.zookeeper.auth.type>>>: Indicates the auth type
+      to use.  Supported values are <<<none>>> and <<<sasl>>>.  The default
+      value is <<<none>>>.
+
+    * <<<signer.secret.provider.zookeeper.kerberos.keytab>>>: Set this to the
+      path with the Kerberos keytab file.  This is only required if using
+      Kerberos.
+
+    * <<<signer.secret.provider.zookeeper.kerberos.principal>>>: Set this to the
+      Kerberos principal to use.  This only required if using Kerberos.
+
+  <<Example>>:
+
++---+
+<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
+    ...
+
+    <filter>
+        <!-- AuthenticationHandler configs not shown -->
+        <init-param>
+            <param-name>signer.secret.provider</param-name>
+            <param-value>string</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signature.secret</param-name>
+            <param-value>my_secret</param-value>
+        </init-param>
+    </filter>
+
+    ...
+</web-app>
++---+
+
+  <<Example>>:
+
++---+
+<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
+    ...
+
+    <filter>
+        <!-- AuthenticationHandler configs not shown -->
+        <init-param>
+            <param-name>signer.secret.provider</param-name>
+            <param-value>random</param-value>
+        </init-param>
+        <init-param>
+            <param-name>token.validity</param-name>
+            <param-value>30</param-value>
+        </init-param>
+    </filter>
+
+    ...
+</web-app>
++---+
+
+  <<Example>>:
+
++---+
+<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
+    ...
+
+    <filter>
+        <!-- AuthenticationHandler configs not shown -->
+        <init-param>
+            <param-name>signer.secret.provider</param-name>
+            <param-value>zookeeper</param-value>
+        </init-param>
+        <init-param>
+            <param-name>token.validity</param-name>
+            <param-value>30</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signer.secret.provider.zookeeper.connection.string</param-name>
+            <param-value>zoo1:2181,zoo2:2181,zoo3:2181</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signer.secret.provider.zookeeper.path</param-name>
+            <param-value>/myapp/secrets</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signer.secret.provider.zookeeper.use.kerberos.acls</param-name>
+            <param-value>true</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signer.secret.provider.zookeeper.kerberos.keytab</param-name>
+            <param-value>/tmp/auth.keytab</param-value>
+        </init-param>
+        <init-param>
+            <param-name>signer.secret.provider.zookeeper.kerberos.principal</param-name>
+            <param-value>HTTP/localhost@LOCALHOST</param-value>
+        </init-param>
+    </filter>
+
+    ...
+</web-app>
++---+
+

+ 5 - 0
hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm

@@ -44,6 +44,11 @@ Hadoop Auth, Java HTTP SPNEGO ${project.version}
   Subsequent HTTP client requests presenting the signed HTTP Cookie have access
   Subsequent HTTP client requests presenting the signed HTTP Cookie have access
   to the protected resources until the HTTP Cookie expires.
   to the protected resources until the HTTP Cookie expires.
 
 
+  The secret used to sign the HTTP Cookie has multiple implementations that
+  provide different behaviors, including a hardcoded secret string, a rolling
+  randomly generated secret, and a rolling randomly generated secret
+  synchronized between multiple servers using ZooKeeper.
+
 * User Documentation
 * User Documentation
 
 
   * {{{./Examples.html}Examples}}
   * {{{./Examples.html}Examples}}

+ 91 - 26
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -162,7 +162,8 @@ public class TestAuthenticationFilter {
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
@@ -186,7 +187,8 @@ public class TestAuthenticationFilter {
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertFalse(filter.isRandomSecret());
       Assert.assertFalse(filter.isRandomSecret());
@@ -206,10 +208,11 @@ public class TestAuthenticationFilter {
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(
+          AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)).thenReturn(
             new SignerSecretProvider() {
             new SignerSecretProvider() {
               @Override
               @Override
-              public void init(Properties config, long tokenValidity) {
+              public void init(Properties config, ServletContext servletContext,
+                      long tokenValidity) {
               }
               }
               @Override
               @Override
               public byte[] getCurrentSecret() {
               public byte[] getCurrentSecret() {
@@ -241,7 +244,8 @@ public class TestAuthenticationFilter {
                                  AuthenticationFilter.COOKIE_PATH)).elements());
                                  AuthenticationFilter.COOKIE_PATH)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
@@ -265,7 +269,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertTrue(DummyAuthenticationHandler.init);
       Assert.assertTrue(DummyAuthenticationHandler.init);
@@ -304,7 +309,8 @@ public class TestAuthenticationFilter {
               AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
               AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
 
 
       filter.init(config);
       filter.init(config);
@@ -330,7 +336,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -361,13 +368,20 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -398,14 +412,21 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token =
       AuthenticationToken token =
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -443,13 +464,20 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -485,7 +513,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -538,7 +567,8 @@ public class TestAuthenticationFilter {
             ".return", "expired.token")).elements());
             ".return", "expired.token")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
 
 
     if (withDomainPath) {
     if (withDomainPath) {
@@ -593,7 +623,13 @@ public class TestAuthenticationFilter {
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
                 Mockito.any(ServletResponse.class));
                 Mockito.any(ServletResponse.class));
 
 
-        Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+        StringSignerSecretProvider secretProvider
+                = new StringSignerSecretProvider();
+        Properties secretProviderProps = new Properties();
+        secretProviderProps.setProperty(
+                AuthenticationFilter.SIGNATURE_SECRET, "secret");
+        secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+        Signer signer = new Signer(secretProvider);
         String value = signer.verifyAndExtract(v);
         String value = signer.verifyAndExtract(v);
         AuthenticationToken token = AuthenticationToken.parse(value);
         AuthenticationToken token = AuthenticationToken.parse(value);
         assertThat(token.getExpires(), not(0L));
         assertThat(token.getExpires(), not(0L));
@@ -662,7 +698,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -671,7 +708,13 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -716,7 +759,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -783,7 +827,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -792,7 +837,13 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider(secret));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, secret);
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -854,7 +905,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -863,7 +915,13 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider(secret));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, secret);
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -893,7 +951,8 @@ public class TestAuthenticationFilter {
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
       ServletContext context = Mockito.mock(ServletContext.class);
       Mockito.when(context.getAttribute(
       Mockito.when(context.getAttribute(
-          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+              .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
@@ -914,7 +973,13 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+      StringSignerSecretProvider secretProvider
+              = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(
+              AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+      Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});

+ 55 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestJaasConfiguration.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Map;
+import javax.security.auth.login.AppConfigurationEntry;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestJaasConfiguration {
+
+  // We won't test actually using it to authenticate because that gets messy and
+  // may conflict with other tests; but we can test that it otherwise behaves
+  // correctly
+  @Test
+  public void test() throws Exception {
+    String krb5LoginModuleName;
+    if (System.getProperty("java.vendor").contains("IBM")) {
+      krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
+    } else {
+      krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
+    }
+
+    ZKSignerSecretProvider.JaasConfiguration jConf =
+            new ZKSignerSecretProvider.JaasConfiguration("foo", "foo/localhost",
+            "/some/location/foo.keytab");
+    AppConfigurationEntry[] entries = jConf.getAppConfigurationEntry("bar");
+    Assert.assertNull(entries);
+    entries = jConf.getAppConfigurationEntry("foo");
+    Assert.assertEquals(1, entries.length);
+    AppConfigurationEntry entry = entries[0];
+    Assert.assertEquals(AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+            entry.getControlFlag());
+    Assert.assertEquals(krb5LoginModuleName, entry.getLoginModuleName());
+    Map<String, ?> options = entry.getOptions();
+    Assert.assertEquals("/some/location/foo.keytab", options.get("keyTab"));
+    Assert.assertEquals("foo/localhost", options.get("principal"));
+    Assert.assertEquals("true", options.get("useKeyTab"));
+    Assert.assertEquals("true", options.get("storeKey"));
+    Assert.assertEquals("false", options.get("useTicketCache"));
+    Assert.assertEquals("true", options.get("refreshKrb5Config"));
+    Assert.assertEquals(6, options.size());
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java

@@ -31,7 +31,7 @@ public class TestRandomSignerSecretProvider {
     RandomSignerSecretProvider secretProvider =
     RandomSignerSecretProvider secretProvider =
         new RandomSignerSecretProvider(seed);
         new RandomSignerSecretProvider(seed);
     try {
     try {
-      secretProvider.init(null, rolloverFrequency);
+      secretProvider.init(null, null, rolloverFrequency);
 
 
       byte[] currentSecret = secretProvider.getCurrentSecret();
       byte[] currentSecret = secretProvider.getCurrentSecret();
       byte[][] allSecrets = secretProvider.getAllSecrets();
       byte[][] allSecrets = secretProvider.getAllSecrets();

+ 1 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java

@@ -28,7 +28,7 @@ public class TestRolloverSignerSecretProvider {
         new TRolloverSignerSecretProvider(
         new TRolloverSignerSecretProvider(
             new byte[][]{secret1, secret2, secret3});
             new byte[][]{secret1, secret2, secret3});
     try {
     try {
-      secretProvider.init(null, rolloverFrequency);
+      secretProvider.init(null, null, rolloverFrequency);
 
 
       byte[] currentSecret = secretProvider.getCurrentSecret();
       byte[] currentSecret = secretProvider.getCurrentSecret();
       byte[][] allSecrets = secretProvider.getAllSecrets();
       byte[][] allSecrets = secretProvider.getAllSecrets();

+ 17 - 6
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java

@@ -14,6 +14,8 @@
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
 import java.util.Properties;
 import java.util.Properties;
+import javax.servlet.ServletContext;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -21,7 +23,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testNullAndEmptyString() throws Exception {
   public void testNullAndEmptyString() throws Exception {
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    Signer signer = new Signer(createStringSignerSecretProvider());
     try {
     try {
       signer.sign(null);
       signer.sign(null);
       Assert.fail();
       Assert.fail();
@@ -42,7 +44,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testSignature() throws Exception {
   public void testSignature() throws Exception {
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    Signer signer = new Signer(createStringSignerSecretProvider());
     String s1 = signer.sign("ok");
     String s1 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s3 = signer.sign("wrong");
     String s3 = signer.sign("wrong");
@@ -52,7 +54,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testVerify() throws Exception {
   public void testVerify() throws Exception {
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    Signer signer = new Signer(createStringSignerSecretProvider());
     String t = "test";
     String t = "test";
     String s = signer.sign(t);
     String s = signer.sign(t);
     String e = signer.verifyAndExtract(s);
     String e = signer.verifyAndExtract(s);
@@ -61,7 +63,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testInvalidSignedText() throws Exception {
   public void testInvalidSignedText() throws Exception {
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    Signer signer = new Signer(createStringSignerSecretProvider());
     try {
     try {
       signer.verifyAndExtract("test");
       signer.verifyAndExtract("test");
       Assert.fail();
       Assert.fail();
@@ -74,7 +76,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testTampering() throws Exception {
   public void testTampering() throws Exception {
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    Signer signer = new Signer(createStringSignerSecretProvider());
     String t = "test";
     String t = "test";
     String s = signer.sign(t);
     String s = signer.sign(t);
     s += "x";
     s += "x";
@@ -88,6 +90,14 @@ public class TestSigner {
     }
     }
   }
   }
 
 
+  private StringSignerSecretProvider createStringSignerSecretProvider() throws Exception {
+      StringSignerSecretProvider secretProvider = new StringSignerSecretProvider();
+      Properties secretProviderProps = new Properties();
+      secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET, "secret");
+      secretProvider.init(secretProviderProps, null, -1);
+      return secretProvider;
+  }
+
   @Test
   @Test
   public void testMultipleSecrets() throws Exception {
   public void testMultipleSecrets() throws Exception {
     TestSignerSecretProvider secretProvider = new TestSignerSecretProvider();
     TestSignerSecretProvider secretProvider = new TestSignerSecretProvider();
@@ -128,7 +138,8 @@ public class TestSigner {
     private byte[] previousSecret;
     private byte[] previousSecret;
 
 
     @Override
     @Override
-    public void init(Properties config, long tokenValidity) {
+    public void init(Properties config, ServletContext servletContext,
+            long tokenValidity) {
     }
     }
 
 
     @Override
     @Override

+ 7 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java

@@ -13,6 +13,8 @@
  */
  */
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
+import java.util.Properties;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -22,8 +24,11 @@ public class TestStringSignerSecretProvider {
   public void testGetSecrets() throws Exception {
   public void testGetSecrets() throws Exception {
     String secretStr = "secret";
     String secretStr = "secret";
     StringSignerSecretProvider secretProvider
     StringSignerSecretProvider secretProvider
-        = new StringSignerSecretProvider(secretStr);
-    secretProvider.init(null, -1);
+            = new StringSignerSecretProvider();
+    Properties secretProviderProps = new Properties();
+    secretProviderProps.setProperty(
+            AuthenticationFilter.SIGNATURE_SECRET, "secret");
+    secretProvider.init(secretProviderProps, null, -1);
     byte[] secretBytes = secretStr.getBytes();
     byte[] secretBytes = secretStr.getBytes();
     Assert.assertArrayEquals(secretBytes, secretProvider.getCurrentSecret());
     Assert.assertArrayEquals(secretBytes, secretProvider.getCurrentSecret());
     byte[][] allSecrets = secretProvider.getAllSecrets();
     byte[][] allSecrets = secretProvider.getAllSecrets();

+ 270 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java

@@ -0,0 +1,270 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.Random;
+import javax.servlet.ServletContext;
+import org.apache.curator.test.TestingServer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestZKSignerSecretProvider {
+
+  private TestingServer zkServer;
+
+  @Before
+  public void setup() throws Exception {
+    zkServer = new TestingServer();
+  }
+
+  @After
+  public void teardown() throws Exception {
+    if (zkServer != null) {
+      zkServer.stop();
+      zkServer.close();
+    }
+  }
+
+  @Test
+  // Test just one ZKSignerSecretProvider to verify that it works in the
+  // simplest case
+  public void testOne() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    // use the same seed so we can predict the RNG
+    long seed = System.currentTimeMillis();
+    Random rand = new Random(seed);
+    byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
+    ZKSignerSecretProvider secretProvider = new ZKSignerSecretProvider(seed);
+    Properties config = new Properties();
+    config.setProperty(
+        ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
+        zkServer.getConnectString());
+    config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
+        "/secret");
+    try {
+      secretProvider.init(config, getDummyServletContext(), rolloverFrequency);
+
+      byte[] currentSecret = secretProvider.getCurrentSecret();
+      byte[][] allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret1, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret1, allSecrets[0]);
+      Assert.assertNull(allSecrets[1]);
+      Thread.sleep((rolloverFrequency + 2000));
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret2, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret2, allSecrets[0]);
+      Assert.assertArrayEquals(secret1, allSecrets[1]);
+      Thread.sleep((rolloverFrequency + 2000));
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret3, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret3, allSecrets[0]);
+      Assert.assertArrayEquals(secret2, allSecrets[1]);
+      Thread.sleep((rolloverFrequency + 2000));
+    } finally {
+      secretProvider.destroy();
+    }
+  }
+
+  @Test
+  public void testMultipleInit() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    // use the same seed so we can predict the RNG
+    long seedA = System.currentTimeMillis();
+    Random rand = new Random(seedA);
+    byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
+    // use the same seed so we can predict the RNG
+    long seedB = System.currentTimeMillis() + rand.nextLong();
+    rand = new Random(seedB);
+    byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
+    // use the same seed so we can predict the RNG
+    long seedC = System.currentTimeMillis() + rand.nextLong();
+    rand = new Random(seedC);
+    byte[] secretC2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretC1 = Long.toString(rand.nextLong()).getBytes();
+    ZKSignerSecretProvider secretProviderA = new ZKSignerSecretProvider(seedA);
+    ZKSignerSecretProvider secretProviderB = new ZKSignerSecretProvider(seedB);
+    ZKSignerSecretProvider secretProviderC = new ZKSignerSecretProvider(seedC);
+    Properties config = new Properties();
+    config.setProperty(
+        ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
+        zkServer.getConnectString());
+    config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
+        "/secret");
+    try {
+      secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
+      secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
+      secretProviderC.init(config, getDummyServletContext(), rolloverFrequency);
+
+      byte[] currentSecretA = secretProviderA.getCurrentSecret();
+      byte[][] allSecretsA = secretProviderA.getAllSecrets();
+      byte[] currentSecretB = secretProviderB.getCurrentSecret();
+      byte[][] allSecretsB = secretProviderB.getAllSecrets();
+      byte[] currentSecretC = secretProviderC.getCurrentSecret();
+      byte[][] allSecretsC = secretProviderC.getAllSecrets();
+      Assert.assertArrayEquals(currentSecretA, currentSecretB);
+      Assert.assertArrayEquals(currentSecretB, currentSecretC);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertEquals(2, allSecretsB.length);
+      Assert.assertEquals(2, allSecretsC.length);
+      Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
+      Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
+      Assert.assertNull(allSecretsA[1]);
+      Assert.assertNull(allSecretsB[1]);
+      Assert.assertNull(allSecretsC[1]);
+      char secretChosen = 'z';
+      if (Arrays.equals(secretA1, currentSecretA)) {
+        Assert.assertArrayEquals(secretA1, allSecretsA[0]);
+        secretChosen = 'A';
+      } else if (Arrays.equals(secretB1, currentSecretB)) {
+        Assert.assertArrayEquals(secretB1, allSecretsA[0]);
+        secretChosen = 'B';
+      }else if (Arrays.equals(secretC1, currentSecretC)) {
+        Assert.assertArrayEquals(secretC1, allSecretsA[0]);
+        secretChosen = 'C';
+      } else {
+        Assert.fail("It appears that they all agreed on the same secret, but "
+                + "not one of the secrets they were supposed to");
+      }
+      Thread.sleep((rolloverFrequency + 2000));
+
+      currentSecretA = secretProviderA.getCurrentSecret();
+      allSecretsA = secretProviderA.getAllSecrets();
+      currentSecretB = secretProviderB.getCurrentSecret();
+      allSecretsB = secretProviderB.getAllSecrets();
+      currentSecretC = secretProviderC.getCurrentSecret();
+      allSecretsC = secretProviderC.getAllSecrets();
+      Assert.assertArrayEquals(currentSecretA, currentSecretB);
+      Assert.assertArrayEquals(currentSecretB, currentSecretC);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertEquals(2, allSecretsB.length);
+      Assert.assertEquals(2, allSecretsC.length);
+      Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
+      Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
+      Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
+      Assert.assertArrayEquals(allSecretsB[1], allSecretsC[1]);
+      // The second secret used is prechosen by whoever won the init; so it
+      // should match with whichever we saw before
+      if (secretChosen == 'A') {
+        Assert.assertArrayEquals(secretA2, currentSecretA);
+      } else if (secretChosen == 'B') {
+        Assert.assertArrayEquals(secretB2, currentSecretA);
+      } else if (secretChosen == 'C') {
+        Assert.assertArrayEquals(secretC2, currentSecretA);
+      }
+    } finally {
+      secretProviderC.destroy();
+      secretProviderB.destroy();
+      secretProviderA.destroy();
+    }
+  }
+
+  @Test
+  public void testMultipleUnsychnronized() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    // use the same seed so we can predict the RNG
+    long seedA = System.currentTimeMillis();
+    Random rand = new Random(seedA);
+    byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretA3 = Long.toString(rand.nextLong()).getBytes();
+    // use the same seed so we can predict the RNG
+    long seedB = System.currentTimeMillis() + rand.nextLong();
+    rand = new Random(seedB);
+    byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretB3 = Long.toString(rand.nextLong()).getBytes();
+    ZKSignerSecretProvider secretProviderA = new ZKSignerSecretProvider(seedA);
+    ZKSignerSecretProvider secretProviderB = new ZKSignerSecretProvider(seedB);
+    Properties config = new Properties();
+    config.setProperty(
+        ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
+        zkServer.getConnectString());
+    config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
+        "/secret");
+    try {
+      secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
+
+      byte[] currentSecretA = secretProviderA.getCurrentSecret();
+      byte[][] allSecretsA = secretProviderA.getAllSecrets();
+      Assert.assertArrayEquals(secretA1, currentSecretA);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertArrayEquals(secretA1, allSecretsA[0]);
+      Assert.assertNull(allSecretsA[1]);
+      Thread.sleep((rolloverFrequency + 2000));
+
+      currentSecretA = secretProviderA.getCurrentSecret();
+      allSecretsA = secretProviderA.getAllSecrets();
+      Assert.assertArrayEquals(secretA2, currentSecretA);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertArrayEquals(secretA2, allSecretsA[0]);
+      Assert.assertArrayEquals(secretA1, allSecretsA[1]);
+      Thread.sleep((rolloverFrequency / 5));
+
+      secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
+
+      byte[] currentSecretB = secretProviderB.getCurrentSecret();
+      byte[][] allSecretsB = secretProviderB.getAllSecrets();
+      Assert.assertArrayEquals(secretA2, currentSecretB);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertArrayEquals(secretA2, allSecretsB[0]);
+      Assert.assertArrayEquals(secretA1, allSecretsB[1]);
+      Thread.sleep((rolloverFrequency));
+
+      currentSecretA = secretProviderA.getCurrentSecret();
+      allSecretsA = secretProviderA.getAllSecrets();
+      currentSecretB = secretProviderB.getCurrentSecret();
+      allSecretsB = secretProviderB.getAllSecrets();
+      Assert.assertArrayEquals(currentSecretA, currentSecretB);
+      Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertEquals(2, allSecretsB.length);
+      Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
+      Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
+      if (Arrays.equals(secretA3, currentSecretA)) {
+        Assert.assertArrayEquals(secretA3, allSecretsA[0]);
+      } else if (Arrays.equals(secretB3, currentSecretB)) {
+        Assert.assertArrayEquals(secretB3, allSecretsA[0]);
+      } else {
+        Assert.fail("It appears that they all agreed on the same secret, but "
+                + "not one of the secrets they were supposed to");
+      }
+    } finally {
+      secretProviderB.destroy();
+      secretProviderA.destroy();
+    }
+  }
+
+  private ServletContext getDummyServletContext() {
+    ServletContext servletContext = Mockito.mock(ServletContext.class);
+    Mockito.when(servletContext.getAttribute(ZKSignerSecretProvider
+            .ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE))
+            .thenReturn(null);
+    return servletContext;
+  }
+}

+ 16 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -325,6 +325,11 @@ Trunk (Unreleased)
     HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked 
     HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked 
     in bin/hdfs (aw)
     in bin/hdfs (aw)
 
 
+    HADOOP-11055. non-daemon pid files are missing (aw)
+
+    HADOOP-11022. User replaced functions get lost 2-3 levels deep (e.g., 
+    sbin) (aw)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -517,6 +522,14 @@ Release 2.6.0 - UNRELEASED
     HADOOP-11074. Move s3-related FS connector code to hadoop-aws (David S.
     HADOOP-11074. Move s3-related FS connector code to hadoop-aws (David S.
     Wang via Colin Patrick McCabe)
     Wang via Colin Patrick McCabe)
 
 
+    HADOOP-11091. Eliminate old configuration parameter names from s3a (David
+    S. Wang via Colin Patrick McCabe)
+
+    HADOOP-10868. AuthenticationFilter should support externalizing the 
+    secret for signing and provide rotation support. (rkanter via tucu)
+
+    HADOOP-10922. User documentation for CredentialShell. (Larry McCay via wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
     HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
@@ -708,6 +721,9 @@ Release 2.6.0 - UNRELEASED
     HADOOP-11056. OsSecureRandom.setConf() might leak file descriptors (yzhang
     HADOOP-11056. OsSecureRandom.setConf() might leak file descriptors (yzhang
     via cmccabe)
     via cmccabe)
 
 
+    HDFS-6912. SharedFileDescriptorFactory should not allocate sparse files
+    (cmccabe)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   
       HADOOP-10734. Implement high-performance secure random number sources.
       HADOOP-10734. Implement high-performance secure random number sources.

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -156,6 +156,7 @@ done
 
 
 hadoop_find_confdir
 hadoop_find_confdir
 hadoop_exec_hadoopenv
 hadoop_exec_hadoopenv
+hadoop_exec_userfuncs
 
 
 #
 #
 # IMPORTANT! User provided code is now available!
 # IMPORTANT! User provided code is now available!

+ 90 - 37
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -104,6 +104,15 @@ function hadoop_exec_hadoopenv
   fi
   fi
 }
 }
 
 
+function hadoop_exec_userfuncs
+{
+  # NOTE: This function is not user replaceable.
+
+  if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
+    . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
+  fi
+}
+
 function hadoop_basic_init
 function hadoop_basic_init
 {
 {
   # Some of these are also set in hadoop-env.sh.
   # Some of these are also set in hadoop-env.sh.
@@ -645,7 +654,7 @@ function hadoop_verify_secure_prereq
   
   
   # ${EUID} comes from the shell itself!
   # ${EUID} comes from the shell itself!
   if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
   if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
-    hadoop_error "ERROR: You must be a privileged in order to run a secure serice."
+    hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
     exit 1
     exit 1
   else
   else
     return 0
     return 0
@@ -704,7 +713,8 @@ function hadoop_verify_logdir
   rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
   rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
 }
 }
 
 
-function hadoop_status_daemon() {
+function hadoop_status_daemon() 
+{
   #
   #
   # LSB 4.1.0 compatible status command (1)
   # LSB 4.1.0 compatible status command (1)
   #
   #
@@ -760,11 +770,19 @@ function hadoop_start_daemon
   # so complex! so wow! much java!
   # so complex! so wow! much java!
   local command=$1
   local command=$1
   local class=$2
   local class=$2
-  shift 2
+  local pidfile=$3
+  shift 3
 
 
   hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
   hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
 
 
+  # this is for the non-daemon pid creation
+  #shellcheck disable=SC2086
+  echo $$ > "${pidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write ${command} pid ${pidfile}."
+  fi
+
   export CLASSPATH
   export CLASSPATH
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
   exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
   exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
@@ -779,27 +797,42 @@ function hadoop_start_daemon_wrapper
   local pidfile=$3
   local pidfile=$3
   local outfile=$4
   local outfile=$4
   shift 4
   shift 4
-  
+
+  local counter
+
   hadoop_rotate_log "${outfile}"
   hadoop_rotate_log "${outfile}"
   
   
   hadoop_start_daemon "${daemonname}" \
   hadoop_start_daemon "${daemonname}" \
-  "$class" "$@" >> "${outfile}" 2>&1 < /dev/null &
+    "$class" \
+    "${pidfile}" \
+    "$@" >> "${outfile}" 2>&1 < /dev/null &
+
+  # we need to avoid a race condition here
+  # so let's wait for the fork to finish 
+  # before overriding with the daemonized pid
+  (( counter=0 ))
+  while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
+    sleep 1
+    (( counter++ ))
+  done
+
+  # this is for daemon pid creation
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
   echo $! > "${pidfile}" 2>/dev/null
   echo $! > "${pidfile}" 2>/dev/null
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${pidfile}."
   fi
   fi
   
   
   # shellcheck disable=SC2086
   # shellcheck disable=SC2086
   renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
   renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot set priority of process $!"
+    hadoop_error "ERROR: Cannot set priority of ${daemoname} process $!"
   fi
   fi
   
   
   # shellcheck disable=SC2086
   # shellcheck disable=SC2086
-  disown $! 2>&1
+  disown %+ >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot disconnect process $!"
+    hadoop_error "ERROR: Cannot disconnect ${daemoname} process $!"
   fi
   fi
   sleep 1
   sleep 1
   
   
@@ -829,7 +862,8 @@ function hadoop_start_secure_daemon
   
   
   # where to send stderr.  same thing, except &2 = stderr
   # where to send stderr.  same thing, except &2 = stderr
   local daemonerrfile=$5
   local daemonerrfile=$5
-  shift 5
+  local privpidfile=$6
+  shift 6
  
  
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonerrfile}"
   hadoop_rotate_log "${daemonerrfile}"
@@ -849,17 +883,23 @@ function hadoop_start_secure_daemon
 
 
   hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
   hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+
+  #shellcheck disable=SC2086
+  echo $$ > "${privpidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write ${daemoname} pid ${privpidfile}."
+  fi
   
   
   exec "${jsvc}" \
   exec "${jsvc}" \
-  "-Dproc_${daemonname}" \
-  -outfile "${daemonoutfile}" \
-  -errfile "${daemonerrfile}" \
-  -pidfile "${daemonpidfile}" \
-  -nodetach \
-  -user "${HADOOP_SECURE_USER}" \
-  -cp "${CLASSPATH}" \
-  ${HADOOP_OPTS} \
-  "${class}" "$@"
+    "-Dproc_${daemonname}" \
+    -outfile "${daemonoutfile}" \
+    -errfile "${daemonerrfile}" \
+    -pidfile "${daemonpidfile}" \
+    -nodetach \
+    -user "${HADOOP_SECURE_USER}" \
+    -cp "${CLASSPATH}" \
+    ${HADOOP_OPTS} \
+    "${class}" "$@"
 }
 }
 
 
 function hadoop_start_secure_daemon_wrapper
 function hadoop_start_secure_daemon_wrapper
@@ -886,39 +926,52 @@ function hadoop_start_secure_daemon_wrapper
   
   
   local daemonerrfile=$7
   local daemonerrfile=$7
   shift 7
   shift 7
+
+  local counter
   
   
   hadoop_rotate_log "${jsvcoutfile}"
   hadoop_rotate_log "${jsvcoutfile}"
   
   
   hadoop_start_secure_daemon \
   hadoop_start_secure_daemon \
-  "${daemonname}" \
-  "${class}" \
-  "${daemonpidfile}" \
-  "${daemonoutfile}" \
-  "${daemonerrfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
-  
-  # This wrapper should only have one child.  Unlike Shawty Lo.
+    "${daemonname}" \
+    "${class}" \
+    "${daemonpidfile}" \
+    "${daemonoutfile}" \
+    "${daemonerrfile}" \
+    "${jsvcpidfile}"  "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
+
+  # we need to avoid a race condition here
+  # so let's wait for the fork to finish 
+  # before overriding with the daemonized pid
+  (( counter=0 ))
+  while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
+    sleep 1
+    (( counter++ ))
+  done
+
+  # this is for the daemon pid creation
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
   echo $! > "${jsvcpidfile}" 2>/dev/null
   echo $! > "${jsvcpidfile}" 2>/dev/null
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${pidfile}."
   fi
   fi
+  
   sleep 1
   sleep 1
   #shellcheck disable=SC2086
   #shellcheck disable=SC2086
   renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
   renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot set priority of process $!"
+    hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
   fi
   fi
   if [[ -f "${daemonpidfile}" ]]; then
   if [[ -f "${daemonpidfile}" ]]; then
     #shellcheck disable=SC2046
     #shellcheck disable=SC2046
-    renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}") >/dev/null 2>&1
+    renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1
     if [[ $? -gt 0 ]]; then
     if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Cannot set priority of process $(cat "${daemonpidfile}")"
+      hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)"
     fi
     fi
   fi
   fi
-  #shellcheck disable=SC2086
-  disown $! 2>&1
+  #shellcheck disable=SC2046
+  disown %+ >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot disconnect process $!"
+    hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
   fi
   fi
   # capture the ulimit output
   # capture the ulimit output
   su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
   su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
@@ -994,7 +1047,7 @@ function hadoop_daemon_handler
       hadoop_verify_logdir
       hadoop_verify_logdir
       hadoop_status_daemon "${daemon_pidfile}"
       hadoop_status_daemon "${daemon_pidfile}"
       if [[ $? == 0  ]]; then
       if [[ $? == 0  ]]; then
-        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}").  Stop it first."
         exit 1
         exit 1
       else
       else
         # stale pid file, so just remove it and continue on
         # stale pid file, so just remove it and continue on
@@ -1003,7 +1056,7 @@ function hadoop_daemon_handler
       ##COMPAT  - differenticate between --daemon start and nothing
       ##COMPAT  - differenticate between --daemon start and nothing
       # "nothing" shouldn't detach
       # "nothing" shouldn't detach
       if [[ "$daemonmode" = "default" ]]; then
       if [[ "$daemonmode" = "default" ]]; then
-        hadoop_start_daemon "${daemonname}" "${class}" "$@"
+        hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@"
       else
       else
         hadoop_start_daemon_wrapper "${daemonname}" \
         hadoop_start_daemon_wrapper "${daemonname}" \
         "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
         "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
@@ -1042,7 +1095,7 @@ function hadoop_secure_daemon_handler
       hadoop_verify_logdir
       hadoop_verify_logdir
       hadoop_status_daemon "${daemon_pidfile}"
       hadoop_status_daemon "${daemon_pidfile}"
       if [[ $? == 0  ]]; then
       if [[ $? == 0  ]]; then
-        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}").  Stop it first."
         exit 1
         exit 1
       else
       else
         # stale pid file, so just remove it and continue on
         # stale pid file, so just remove it and continue on
@@ -1054,7 +1107,7 @@ function hadoop_secure_daemon_handler
       if [[ "${daemonmode}" = "default" ]]; then
       if [[ "${daemonmode}" = "default" ]]; then
         hadoop_start_secure_daemon "${daemonname}" "${classname}" \
         hadoop_start_secure_daemon "${daemonname}" "${classname}" \
         "${daemon_pidfile}" "${daemon_outfile}" \
         "${daemon_pidfile}" "${daemon_outfile}" \
-        "${priv_errfile}"  "$@"
+        "${priv_errfile}" "${priv_pidfile}" "$@"
       else
       else
         hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
         hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
         "${daemon_pidfile}" "${daemon_outfile}" \
         "${daemon_pidfile}" "${daemon_outfile}" \

+ 0 - 81
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -346,84 +346,3 @@ esac
 # via this special env var:
 # via this special env var:
 # HADOOP_ENABLE_BUILD_PATHS="true"
 # HADOOP_ENABLE_BUILD_PATHS="true"
 
 
-# You can do things like replace parts of the shell underbelly.
-# Most of this code is in hadoop-functions.sh.
-#
-#
-# For example, if you want to add compression to the rotation
-# menthod for the .out files that daemons generate, you can do
-# that by redefining the hadoop_rotate_log function by
-# uncommenting this code block:
-
-#function hadoop_rotate_log
-#{
-#  #
-#  # log rotation (mainly used for .out files)
-#  # Users are likely to replace this one for something
-#  # that gzips or uses dates or who knows what.
-#  #
-#  # be aware that &1 and &2 might go through here
-#  # so don't do anything too crazy...
-#  #
-#  local log=$1;
-#  local num=${2:-5};
-#
-#  if [[ -f "${log}" ]]; then # rotate logs
-#    while [[ ${num} -gt 1 ]]; do
-#      #shellcheck disable=SC2086
-#      let prev=${num}-1
-#      if [[ -f "${log}.${prev}" ]]; then
-#        mv "${log}.${prev}" "${log}.${num}"
-#      fi
-#      num=${prev}
-#    done
-#    mv "${log}" "${log}.${num}"
-#    gzip -9 "${log}.${num}"
-#  fi
-#}
-#
-#
-# Another example:  finding java
-#
-# By default, Hadoop assumes that $JAVA_HOME is always defined
-# outside of its configuration. Eons ago, Apple standardized
-# on a helper program called java_home to find it for you.
-#
-#function hadoop_java_setup
-#{
-#
-#  if [[ -z "${JAVA_HOME}" ]]; then
-#     case $HADOOP_OS_TYPE in
-#       Darwin*)
-#          JAVA_HOME=$(/usr/libexec/java_home)
-#          ;;
-#     esac
-#  fi
-#
-#  # Bail if we did not detect it
-#  if [[ -z "${JAVA_HOME}" ]]; then
-#    echo "ERROR: JAVA_HOME is not set and could not be found." 1>&2
-#    exit 1
-#  fi
-#
-#  if [[ ! -d "${JAVA_HOME}" ]]; then
-#     echo "ERROR: JAVA_HOME (${JAVA_HOME}) does not exist." 1>&2
-#     exit 1
-#  fi
-#
-#  JAVA="${JAVA_HOME}/bin/java"
-#
-#  if [[ ! -x ${JAVA} ]]; then
-#    echo "ERROR: ${JAVA} is not executable." 1>&2
-#    exit 1
-#  fi
-#  JAVA_HEAP_MAX=-Xmx1g
-#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
-#
-#  # check envvars which might override default args
-#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
-#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
-#  fi
-#}
-
-

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -214,4 +214,13 @@
     A special value of "*" means all users are allowed.</description>
     A special value of "*" means all users are allowed.</description>
   </property>
   </property>
 
 
+  <property>
+    <name>security.applicationhistory.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationHistoryProtocol, used by the timeline
+    server and the generic history service client to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
 </configuration>
 </configuration>

+ 94 - 0
hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example

@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#######
+# Advanced Users Only
+######
+
+# You can do things like replace parts of the shell underbelly.
+# Most of this code is in hadoop-functions.sh.
+#
+#
+# For example, if you want to add compression to the rotation
+# menthod for the .out files that daemons generate, you can do
+# that by redefining the hadoop_rotate_log function by
+# uncommenting this code block:
+
+#function hadoop_rotate_log
+#{
+#  local log=$1;
+#  local num=${2:-5};
+#
+#  if [[ -f "${log}" ]]; then
+#    while [[ ${num} -gt 1 ]]; do
+#      #shellcheck disable=SC2086
+#      let prev=${num}-1
+#      if [[ -f "${log}.${prev}.gz" ]]; then
+#        mv "${log}.${prev}.gz" "${log}.${num}.gz"
+#      fi
+#      num=${prev}
+#    done
+#    mv "${log}" "${log}.${num}"
+#    gzip -9 "${log}.${num}"
+#  fi
+#}
+#
+#
+
+#
+# Another example:  finding java
+#
+# By default, Hadoop assumes that $JAVA_HOME is always defined
+# outside of its configuration. Eons ago, Apple standardized
+# on a helper program called java_home to find it for you.
+#
+#function hadoop_java_setup
+#{
+#
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#     case $HADOOP_OS_TYPE in
+#       Darwin*)
+#          JAVA_HOME=$(/usr/libexec/java_home)
+#          ;;
+#     esac
+#  fi
+#
+#  # Bail if we did not detect it
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#    echo "ERROR: JAVA_HOME is not set and could not be found." 1>&2
+#    exit 1
+#  fi
+#
+#  if [[ ! -d "${JAVA_HOME}" ]]; then
+#     echo "ERROR: JAVA_HOME (${JAVA_HOME}) does not exist." 1>&2
+#     exit 1
+#  fi
+#
+#  JAVA="${JAVA_HOME}/bin/java"
+#
+#  if [[ ! -x ${JAVA} ]]; then
+#    echo "ERROR: ${JAVA} is not executable." 1>&2
+#    exit 1
+#  fi
+#  JAVA_HEAP_MAX=-Xmx1g
+#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
+#
+#  # check envvars which might override default args
+#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+#  fi
+#}

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -143,8 +143,10 @@ public abstract class HAAdmin extends Configured implements Tool {
     }
     }
     /*  returns true if other target node is active or some exception occurred 
     /*  returns true if other target node is active or some exception occurred 
         and forceActive was not set  */
         and forceActive was not set  */
-    if(isOtherTargetNodeActive(argv[0], cmd.hasOption(FORCEACTIVE))) {
-      return -1;
+    if(!cmd.hasOption(FORCEACTIVE)) {
+      if(isOtherTargetNodeActive(argv[0], cmd.hasOption(FORCEACTIVE))) {
+        return -1;
+      }
     }
     }
     HAServiceTarget target = resolveTarget(argv[0]);
     HAServiceTarget target = resolveTarget(argv[0]);
     if (!checkManualStateManagementOK(target)) {
     if (!checkManualStateManagementOK(target)) {

+ 30 - 2
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.c

@@ -37,6 +37,8 @@
 #include <sys/types.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <unistd.h>
 
 
+#define ZERO_FULLY_BUF_SIZE 8192
+
 static pthread_mutex_t g_rand_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t g_rand_lock = PTHREAD_MUTEX_INITIALIZER;
 
 
 JNIEXPORT void JNICALL
 JNIEXPORT void JNICALL
@@ -83,6 +85,24 @@ done:
   }
   }
 }
 }
 
 
+static int zero_fully(int fd, jint length)
+{
+  char buf[ZERO_FULLY_BUF_SIZE];
+  int res;
+
+  memset(buf, 0, sizeof(buf));
+  while (length > 0) {
+    res = write(fd, buf,
+      (length > ZERO_FULLY_BUF_SIZE) ? ZERO_FULLY_BUF_SIZE : length);
+    if (res < 0) {
+      if (errno == EINTR) continue;
+      return errno;
+    }
+    length -= res;
+  }
+  return 0;
+}
+
 JNIEXPORT jobject JNICALL
 JNIEXPORT jobject JNICALL
 Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0(
 Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0(
   JNIEnv *env, jclass clazz, jstring jprefix, jstring jpath, jint length)
   JNIEnv *env, jclass clazz, jstring jprefix, jstring jpath, jint length)
@@ -136,12 +156,20 @@ Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0
     (*env)->Throw(env, jthr);
     (*env)->Throw(env, jthr);
     goto done;
     goto done;
   }
   }
-  if (ftruncate(fd, length) < 0) {
-    jthr = newIOException(env, "ftruncate(%s, %d) failed: error %d (%s)",
+  ret = zero_fully(fd, length);
+  if (ret) {
+    jthr = newIOException(env, "zero_fully(%s, %d) failed: error %d (%s)",
                           path, length, ret, terror(ret));
                           path, length, ret, terror(ret));
     (*env)->Throw(env, jthr);
     (*env)->Throw(env, jthr);
     goto done;
     goto done;
   }
   }
+  if (lseek(fd, 0, SEEK_SET) < 0) {
+    ret = errno;
+    jthr = newIOException(env, "lseek(%s, 0, SEEK_SET) failed: error %d (%s)",
+                          path, ret, terror(ret));
+    (*env)->Throw(env, jthr);
+    goto done;
+  }
   jret = fd_create(env, fd); // throws exception on error.
   jret = fd_create(env, fd); // throws exception on error.
 
 
 done:
 done:

+ 53 - 0
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -85,6 +85,59 @@ User Commands
    {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/HadoopArchives.html}
    {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/HadoopArchives.html}
    Hadoop Archives Guide}}.
    Hadoop Archives Guide}}.
 
 
+* <<<credential>>>
+
+   Command to manage credentials, passwords and secrets within credential providers.
+
+   The CredentialProvider API in Hadoop allows for the separation of applications
+   and how they store their required passwords/secrets. In order to indicate
+   a particular provider type and location, the user must provide the
+   <hadoop.security.credential.provider.path> configuration element in core-site.xml
+   or use the command line option <<<-provider>>> on each of the following commands.
+   This provider path is a comma-separated list of URLs that indicates the type and
+   location of a list of providers that should be consulted.
+   For example, the following path:
+
+   <<<user:///,jceks://file/tmp/test.jceks,jceks://hdfs@nn1.example.com/my/path/test.jceks>>>
+
+   indicates that the current user's credentials file should be consulted through
+   the User Provider, that the local file located at <<</tmp/test.jceks>>> is a Java Keystore
+   Provider and that the file located within HDFS at <<<nn1.example.com/my/path/test.jceks>>>
+   is also a store for a Java Keystore Provider.
+
+   When utilizing the credential command it will often be for provisioning a password
+   or secret to a particular credential store provider. In order to explicitly
+   indicate which provider store to use the <<<-provider>>> option should be used. Otherwise,
+   given a path of multiple providers, the first non-transient provider will be used.
+   This may or may not be the one that you intended.
+
+   Example: <<<-provider jceks://file/tmp/test.jceks>>>
+
+   Usage: <<<hadoop credential <subcommand> [options]>>>
+
+*-------------------+-------------------------------------------------------+
+||COMMAND_OPTION    ||                   Description
+*-------------------+-------------------------------------------------------+
+| create <alias> [-v <value>][-provider <provider-path>]| Prompts the user for
+                    | a credential to be stored as the given alias when a value
+                    | is not provided via <<<-v>>>. The
+                    | <hadoop.security.credential.provider.path> within the
+                    | core-site.xml file will be used unless a <<<-provider>>> is
+                    | indicated.
+*-------------------+-------------------------------------------------------+
+| delete <alias> [-i][-provider <provider-path>] | Deletes the credential with
+                    | the provided alias and optionally warns the user when
+                    | <<<--interactive>>> is used.
+                    | The <hadoop.security.credential.provider.path> within the
+                    | core-site.xml file will be used unless a <<<-provider>>> is
+                    | indicated.
+*-------------------+-------------------------------------------------------+
+| list [-provider <provider-path>] | Lists all of the credential aliases
+                    | The <hadoop.security.credential.provider.path> within the
+                    | core-site.xml file will be used unless a <<<-provider>>> is
+                    | indicated.
+*-------------------+-------------------------------------------------------+
+
 * <<<distcp>>>
 * <<<distcp>>>
 
 
    Copy file or directories recursively. More information can be found at
    Copy file or directories recursively. More information can be found at

+ 24 - 1
hadoop-common-project/hadoop-kms/pom.xml

@@ -238,7 +238,7 @@
         <executions>
         <executions>
           <execution>
           <execution>
             <id>default-war</id>
             <id>default-war</id>
-            <phase>package</phase>
+            <phase>prepare-package</phase>
             <goals>
             <goals>
               <goal>war</goal>
               <goal>war</goal>
             </goals>
             </goals>
@@ -251,6 +251,29 @@
           </execution>
           </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>prepare-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+            <configuration>
+              <classifier>classes</classifier>
+            </configuration>
+          </execution>
+          <execution>
+            <id>prepare-test-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
       <plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
         <artifactId>findbugs-maven-plugin</artifactId>

+ 39 - 8
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java

@@ -18,7 +18,9 @@
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.mortbay.jetty.Connector;
 import org.mortbay.jetty.Connector;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
@@ -26,7 +28,10 @@ import org.mortbay.jetty.security.SslSocketConnector;
 import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.jetty.webapp.WebAppContext;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.FileWriter;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.io.Writer;
 import java.io.Writer;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
@@ -34,6 +39,7 @@ import java.net.ServerSocket;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
+import java.util.UUID;
 
 
 public class MiniKMS {
 public class MiniKMS {
 
 
@@ -140,13 +146,15 @@ public class MiniKMS {
   }
   }
 
 
   public void start() throws Exception {
   public void start() throws Exception {
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
     System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
     System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
     File aclsFile = new File(kmsConfDir, "kms-acls.xml");
     File aclsFile = new File(kmsConfDir, "kms-acls.xml");
     if (!aclsFile.exists()) {
     if (!aclsFile.exists()) {
-      Configuration acls = new Configuration(false);
-      Writer writer = new FileWriter(aclsFile);
-      acls.writeXml(writer);
-      writer.close();
+      InputStream is = cl.getResourceAsStream("mini-kms-acls-default.xml");
+      OutputStream os = new FileOutputStream(aclsFile);
+      IOUtils.copy(is, os);
+      is.close();
+      os.close();
     }
     }
     File coreFile = new File(kmsConfDir, "core-site.xml");
     File coreFile = new File(kmsConfDir, "core-site.xml");
     if (!coreFile.exists()) {
     if (!coreFile.exists()) {
@@ -161,19 +169,42 @@ public class MiniKMS {
       kms.set("hadoop.security.key.provider.path",
       kms.set("hadoop.security.key.provider.path",
           "jceks://file@" + new Path(kmsConfDir, "kms.keystore").toUri());
           "jceks://file@" + new Path(kmsConfDir, "kms.keystore").toUri());
       kms.set("hadoop.kms.authentication.type", "simple");
       kms.set("hadoop.kms.authentication.type", "simple");
+      kms.setBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, false);
       Writer writer = new FileWriter(kmsFile);
       Writer writer = new FileWriter(kmsFile);
       kms.writeXml(writer);
       kms.writeXml(writer);
       writer.close();
       writer.close();
     }
     }
     System.setProperty("log4j.configuration", log4jConfFile);
     System.setProperty("log4j.configuration", log4jConfFile);
     jetty = createJettyServer(keyStore, keyStorePassword);
     jetty = createJettyServer(keyStore, keyStorePassword);
-    ClassLoader cl = Thread.currentThread().getContextClassLoader();
-    URL url = cl.getResource("kms-webapp");
-    if (url == null) {
+
+    // we need to do a special handling for MiniKMS to work when in a dir and
+    // when in a JAR in the classpath thanks to Jetty way of handling of webapps
+    // when they are in the a DIR, WAR or JAR.
+    URL webXmlUrl = cl.getResource("kms-webapp/WEB-INF/web.xml");
+    if (webXmlUrl == null) {
       throw new RuntimeException(
       throw new RuntimeException(
           "Could not find kms-webapp/ dir in test classpath");
           "Could not find kms-webapp/ dir in test classpath");
     }
     }
-    WebAppContext context = new WebAppContext(url.getPath(), "/kms");
+    boolean webXmlInJar = webXmlUrl.getPath().contains(".jar!/");
+    String webappPath;
+    if (webXmlInJar) {
+      File webInf = new File("target/" + UUID.randomUUID().toString() +
+          "/kms-webapp/WEB-INF");
+      webInf.mkdirs();
+      new File(webInf, "web.xml").delete();
+      InputStream is = cl.getResourceAsStream("kms-webapp/WEB-INF/web.xml");
+      OutputStream os = new FileOutputStream(new File(webInf, "web.xml"));
+      IOUtils.copy(is, os);
+      is.close();
+      os.close();
+      webappPath = webInf.getParentFile().getAbsolutePath();
+    } else {
+      webappPath = cl.getResource("kms-webapp").getPath();
+    }
+    WebAppContext context = new WebAppContext(webappPath, "/kms");
+    if (webXmlInJar) {
+      context.setClassLoader(cl);
+    }
     jetty.addHandler(context);
     jetty.addHandler(context);
     jetty.start();
     jetty.start();
     kmsURL = new URL(getJettyURL(jetty), "kms");
     kmsURL = new URL(getJettyURL(jetty), "kms");

+ 135 - 0
hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml

@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- This file is hot-reloaded when it changes -->
+
+  <!-- KMS ACLs -->
+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operation.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata an get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+      Complimentary ACL for CREATE and ROLLOVER operation to allow the client
+      to provide the key material when creating or rolling a key.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey CryptoExtension operations
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decrypt EncryptedKey CryptoExtension operations
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.MANAGEMENT</name>
+    <value>*</value>
+    <description>
+      default ACL for MANAGEMENT operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for GENERATE_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for DECRYPT_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.READ</name>
+    <value>*</value>
+    <description>
+      default ACL for READ operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+
+</configuration>

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -66,6 +66,8 @@ import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.jetty.webapp.WebAppContext;
 
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
+import java.util.Properties;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 
 
 public class TestHttpFSServer extends HFSTestCase {
 public class TestHttpFSServer extends HFSTestCase {
@@ -685,7 +687,11 @@ public class TestHttpFSServer extends HFSTestCase {
       new AuthenticationToken("u", "p",
       new AuthenticationToken("u", "p",
           new KerberosDelegationTokenAuthenticationHandler().getType());
           new KerberosDelegationTokenAuthenticationHandler().getType());
     token.setExpires(System.currentTimeMillis() + 100000000);
     token.setExpires(System.currentTimeMillis() + 100000000);
-    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
+    StringSignerSecretProvider secretProvider = new StringSignerSecretProvider();
+    Properties secretProviderProps = new Properties();
+    secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET, "secret");
+    secretProvider.init(secretProviderProps, null, -1);
+    Signer signer = new Signer(secretProvider);
     String tokenSigned = signer.sign(token.toString());
     String tokenSigned = signer.sign(token.toString());
 
 
     url = new URL(TestJettyHelper.getJettyURL(),
     url = new URL(TestJettyHelper.getJettyURL(),

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -481,6 +481,16 @@ Release 2.6.0 - UNRELEASED
     HDFS-7061. Add test to verify encryption zone creation after NameNode
     HDFS-7061. Add test to verify encryption zone creation after NameNode
     restart without saving namespace. (Stephen Chu via wang)
     restart without saving namespace. (Stephen Chu via wang)
 
 
+    HDFS-7059. HAadmin transtionToActive with forceActive option can show
+    confusing message.
+
+    HDFS-6880. Adding tracing to DataNode data transfer protocol. (iwasakims
+    via cmccabe)
+
+    HDFS-7006. Test encryption zones with KMS. (Anthony Young-Garner and tucu)
+
+    HDFS-6851. Refactor EncryptionZoneWithId and EncryptionZone. (clamb via wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -670,6 +680,16 @@ Release 2.6.0 - UNRELEASED
     HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
     HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
     (Yi Liu via wang)
     (Yi Liu via wang)
 
 
+    HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
+    (clamb via wang)
+
+    HDFS-6965. NN continues to issue block locations for DNs with full disks.
+    (Rushabh Shah via kihwal)
+
+    HDFS-6789. TestDFSClientFailover.testFileContextDoesntDnsResolveLogicalURI
+    and TestDFSClientFailover.testDoesntDnsResolveLogicalURI failing on jdk7.
+    (Akira Ajisaka via wang)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   
       HDFS-6387. HDFS CLI admin tool for creating & deleting an
       HDFS-6387. HDFS CLI admin tool for creating & deleting an

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -185,6 +185,19 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.htrace</groupId>
       <groupId>org.htrace</groupId>
       <artifactId>htrace-core</artifactId>
       <artifactId>htrace-core</artifactId>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-kms</artifactId>
+      <classifier>classes</classifier>
+      <type>jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-kms</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh

@@ -20,8 +20,11 @@
 
 
 function hadoop_subproject_init
 function hadoop_subproject_init
 {
 {
-  if [[ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]]; then
-    . "${HADOOP_CONF_DIR}/hdfs-env.sh"
+  if [[ -z "${HADOOP_HDFS_ENV_PROCESSED}" ]]; then
+    if [[ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]]; then
+      . "${HADOOP_CONF_DIR}/hdfs-env.sh"
+      export HADOOP_HDFS_ENV_PROCESSED=true
+    fi
   fi
   fi
   
   
   # at some point in time, someone thought it would be a good idea to
   # at some point in time, someone thought it would be a good idea to

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -154,7 +154,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -2906,8 +2905,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
           throws IOException {
           throws IOException {
     checkOpen();
     checkOpen();
     try {
     try {
-      final EncryptionZoneWithId ezi = namenode.getEZForPath(src);
-      return (ezi.getId() < 0) ? null : ezi;
+      final EncryptionZone ez = namenode.getEZForPath(src);
+      return (ez.getId() < 0) ? null : ez;
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
       throw re.unwrapRemoteException(AccessControlException.class,
                                      UnresolvedPathException.class);
                                      UnresolvedPathException.class);

+ 36 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -88,6 +88,10 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
+import org.htrace.Span;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.CacheLoader;
@@ -355,12 +359,22 @@ public class DFSOutputStream extends FSOutputSummer
     /** Append on an existing block? */
     /** Append on an existing block? */
     private final boolean isAppend;
     private final boolean isAppend;
 
 
+    private final Span traceSpan;
+
     /**
     /**
      * Default construction for file create
      * Default construction for file create
      */
      */
     private DataStreamer() {
     private DataStreamer() {
+      this(null);
+    }
+
+    /**
+     * construction with tracing info
+     */
+    private DataStreamer(Span span) {
       isAppend = false;
       isAppend = false;
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
+      traceSpan = span;
     }
     }
     
     
     /**
     /**
@@ -371,9 +385,10 @@ public class DFSOutputStream extends FSOutputSummer
      * @throws IOException if error occurs
      * @throws IOException if error occurs
      */
      */
     private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
     private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
-        int bytesPerChecksum) throws IOException {
+        int bytesPerChecksum, Span span) throws IOException {
       isAppend = true;
       isAppend = true;
       stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
       stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
+      traceSpan = span;
       block = lastBlock.getBlock();
       block = lastBlock.getBlock();
       bytesSent = block.getNumBytes();
       bytesSent = block.getNumBytes();
       accessToken = lastBlock.getBlockToken();
       accessToken = lastBlock.getBlockToken();
@@ -463,6 +478,10 @@ public class DFSOutputStream extends FSOutputSummer
     @Override
     @Override
     public void run() {
     public void run() {
       long lastPacket = Time.now();
       long lastPacket = Time.now();
+      TraceScope traceScope = null;
+      if (traceSpan != null) {
+        traceScope = Trace.continueSpan(traceSpan);
+      }
       while (!streamerClosed && dfsClient.clientRunning) {
       while (!streamerClosed && dfsClient.clientRunning) {
 
 
         // if the Responder encountered an error, shutdown Responder
         // if the Responder encountered an error, shutdown Responder
@@ -636,6 +655,9 @@ public class DFSOutputStream extends FSOutputSummer
           }
           }
         }
         }
       }
       }
+      if (traceScope != null) {
+        traceScope.close();
+      }
       closeInternal();
       closeInternal();
     }
     }
 
 
@@ -1611,7 +1633,11 @@ public class DFSOutputStream extends FSOutputSummer
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
         checksum.getBytesPerChecksum());
         checksum.getBytesPerChecksum());
 
 
-    streamer = new DataStreamer();
+    Span traceSpan = null;
+    if (Trace.isTracing()) {
+      traceSpan = Trace.startSpan(this.getClass().getSimpleName()).detach();
+    }
+    streamer = new DataStreamer(traceSpan);
     if (favoredNodes != null && favoredNodes.length != 0) {
     if (favoredNodes != null && favoredNodes.length != 0) {
       streamer.setFavoredNodes(favoredNodes);
       streamer.setFavoredNodes(favoredNodes);
     }
     }
@@ -1652,15 +1678,21 @@ public class DFSOutputStream extends FSOutputSummer
     this(dfsClient, src, progress, stat, checksum);
     this(dfsClient, src, progress, stat, checksum);
     initialFileSize = stat.getLen(); // length of file when opened
     initialFileSize = stat.getLen(); // length of file when opened
 
 
+    Span traceSpan = null;
+    if (Trace.isTracing()) {
+      traceSpan = Trace.startSpan(this.getClass().getSimpleName()).detach();
+    }
+
     // The last partial block of the file has to be filled.
     // The last partial block of the file has to be filled.
     if (lastBlock != null) {
     if (lastBlock != null) {
       // indicate that we are appending to an existing block
       // indicate that we are appending to an existing block
       bytesCurBlock = lastBlock.getBlockSize();
       bytesCurBlock = lastBlock.getBlockSize();
-      streamer = new DataStreamer(lastBlock, stat, checksum.getBytesPerChecksum());
+      streamer = new DataStreamer(lastBlock, stat,
+          checksum.getBytesPerChecksum(), traceSpan);
     } else {
     } else {
       computePacketChunkSize(dfsClient.getConf().writePacketSize,
       computePacketChunkSize(dfsClient.getConf().writePacketSize,
           checksum.getBytesPerChecksum());
           checksum.getBytesPerChecksum());
-      streamer = new DataStreamer();
+      streamer = new DataStreamer(traceSpan);
     }
     }
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
   }
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1296,7 +1296,7 @@ public interface ClientProtocol {
    * Get the encryption zone for a path.
    * Get the encryption zone for a path.
    */
    */
   @Idempotent
   @Idempotent
-  public EncryptionZoneWithId getEZForPath(String src)
+  public EncryptionZone getEZForPath(String src)
     throws IOException;
     throws IOException;
 
 
   /**
   /**
@@ -1307,7 +1307,7 @@ public interface ClientProtocol {
    * @return Batch of encryption zones.
    * @return Batch of encryption zones.
    */
    */
   @Idempotent
   @Idempotent
-  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+  public BatchedEntries<EncryptionZone> listEncryptionZones(
       long prevId) throws IOException;
       long prevId) throws IOException;
 
 
   /**
   /**

+ 14 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java

@@ -24,7 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
  * A simple class for representing an encryption zone. Presently an encryption
  * A simple class for representing an encryption zone. Presently an encryption
- * zone only has a path (the root of the encryption zone) and a key name.
+ * zone only has a path (the root of the encryption zone), a key name, and a
+ * unique id. The id is used to implement batched listing of encryption zones.
  */
  */
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -32,10 +33,12 @@ public class EncryptionZone {
 
 
   private final String path;
   private final String path;
   private final String keyName;
   private final String keyName;
+  private final long id;
 
 
-  public EncryptionZone(String path, String keyName) {
+  public EncryptionZone(String path, String keyName, long id) {
     this.path = path;
     this.path = path;
     this.keyName = keyName;
     this.keyName = keyName;
+    this.id = id;
   }
   }
 
 
   public String getPath() {
   public String getPath() {
@@ -46,10 +49,14 @@ public class EncryptionZone {
     return keyName;
     return keyName;
   }
   }
 
 
+  public long getId() {
+    return id;
+  }
+
   @Override
   @Override
   public int hashCode() {
   public int hashCode() {
     return new HashCodeBuilder(13, 31).
     return new HashCodeBuilder(13, 31).
-      append(path).append(keyName).
+      append(path).append(keyName).append(id).
       toHashCode();
       toHashCode();
   }
   }
 
 
@@ -69,11 +76,14 @@ public class EncryptionZone {
     return new EqualsBuilder().
     return new EqualsBuilder().
       append(path, rhs.path).
       append(path, rhs.path).
       append(keyName, rhs.keyName).
       append(keyName, rhs.keyName).
+      append(id, rhs.id).
       isEquals();
       isEquals();
   }
   }
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
-    return "EncryptionZone [path=" + path + ", keyName=" + keyName + "]";
+    return "EncryptionZone [path=" + path +
+        ", keyName=" + keyName +
+        ", id=" + id + "]";
   }
   }
 }
 }

+ 11 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java

@@ -22,7 +22,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.BatchedRemoteIterator;
 
 
 /**
 /**
  * EncryptionZoneIterator is a remote iterator that iterates over encryption
  * EncryptionZoneIterator is a remote iterator that iterates over encryption
@@ -30,22 +30,24 @@ import org.apache.hadoop.fs.RemoteIterator;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class EncryptionZoneIterator implements RemoteIterator<EncryptionZone> {
+public class EncryptionZoneIterator
+    extends BatchedRemoteIterator<Long, EncryptionZone> {
 
 
-  private final EncryptionZoneWithIdIterator iterator;
+  private final ClientProtocol namenode;
 
 
   public EncryptionZoneIterator(ClientProtocol namenode) {
   public EncryptionZoneIterator(ClientProtocol namenode) {
-    iterator = new EncryptionZoneWithIdIterator(namenode);
+    super(Long.valueOf(0));
+    this.namenode = namenode;
   }
   }
 
 
   @Override
   @Override
-  public boolean hasNext() throws IOException {
-    return iterator.hasNext();
+  public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
+      throws IOException {
+    return namenode.listEncryptionZones(prevId);
   }
   }
 
 
   @Override
   @Override
-  public EncryptionZone next() throws IOException {
-    EncryptionZoneWithId ezwi = iterator.next();
-    return ezwi.toEncryptionZone();
+  public Long elementToPrevKey(EncryptionZone entry) {
+    return entry.getId();
   }
   }
 }
 }

+ 0 - 81
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java

@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Internal class similar to an {@link EncryptionZone} which also holds a
- * unique id. Used to implement batched listing of encryption zones.
- */
-@InterfaceAudience.Private
-public class EncryptionZoneWithId extends EncryptionZone {
-
-  final long id;
-
-  public EncryptionZoneWithId(String path, String keyName, long id) {
-    super(path, keyName);
-    this.id = id;
-  }
-
-  public long getId() {
-    return id;
-  }
-
-  EncryptionZone toEncryptionZone() {
-    return new EncryptionZone(getPath(), getKeyName());
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 29)
-        .append(super.hashCode())
-        .append(id)
-        .toHashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    if (!super.equals(o)) {
-      return false;
-    }
-
-    EncryptionZoneWithId that = (EncryptionZoneWithId) o;
-
-    if (id != that.id) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public String toString() {
-    return "EncryptionZoneWithId [" +
-        "id=" + id +
-        ", " + super.toString() +
-        ']';
-  }
-}

+ 40 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java

@@ -25,12 +25,16 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
-
+import org.htrace.Span;
+import org.htrace.Trace;
+import org.htrace.TraceInfo;
+import org.htrace.TraceScope;
 
 
 /**
 /**
  * Static utilities for dealing with the protocol buffers used by the
  * Static utilities for dealing with the protocol buffers used by the
@@ -78,9 +82,41 @@ public abstract class DataTransferProtoUtil {
 
 
   static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
   static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
       Token<BlockTokenIdentifier> blockToken) {
       Token<BlockTokenIdentifier> blockToken) {
-    return BaseHeaderProto.newBuilder()
+    BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
       .setBlock(PBHelper.convert(blk))
       .setBlock(PBHelper.convert(blk))
-      .setToken(PBHelper.convert(blockToken))
-      .build();
+      .setToken(PBHelper.convert(blockToken));
+    if (Trace.isTracing()) {
+      Span s = Trace.currentSpan();
+      builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
+          .setTraceId(s.getTraceId())
+          .setParentId(s.getSpanId()));
+    }
+    return builder.build();
+  }
+
+  public static TraceInfo fromProto(DataTransferTraceInfoProto proto) {
+    if (proto == null) return null;
+    if (!proto.hasTraceId()) return null;
+    return new TraceInfo(proto.getTraceId(), proto.getParentId());
+  }
+
+  public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
+      String description) {
+    return continueTraceSpan(header.getBaseHeader(), description);
+  }
+
+  public static TraceScope continueTraceSpan(BaseHeaderProto header,
+      String description) {
+    return continueTraceSpan(header.getTraceInfo(), description);
+  }
+
+  public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto,
+      String description) {
+    TraceScope scope = null;
+    TraceInfo info = fromProto(proto);
+    if (info != null) {
+      scope = Trace.startSpan(description, info);
+    }
+    return scope;
   }
   }
 }
 }

+ 89 - 34
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 package org.apache.hadoop.hdfs.protocol.datatransfer;
 
 
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.continueTraceSpan;
 import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
 
 
 import java.io.DataInputStream;
 import java.io.DataInputStream;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmR
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
+import org.htrace.TraceScope;
 
 
 /** Receiver */
 /** Receiver */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -108,7 +110,10 @@ public abstract class Receiver implements DataTransferProtocol {
   /** Receive OP_READ_BLOCK */
   /** Receive OP_READ_BLOCK */
   private void opReadBlock() throws IOException {
   private void opReadBlock() throws IOException {
     OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
     OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in));
-    readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
         proto.getOffset(),
         proto.getOffset(),
@@ -117,27 +122,36 @@ public abstract class Receiver implements DataTransferProtocol {
         (proto.hasCachingStrategy() ?
         (proto.hasCachingStrategy() ?
             getCachingStrategy(proto.getCachingStrategy()) :
             getCachingStrategy(proto.getCachingStrategy()) :
           CachingStrategy.newDefaultStrategy()));
           CachingStrategy.newDefaultStrategy()));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
   
   
   /** Receive OP_WRITE_BLOCK */
   /** Receive OP_WRITE_BLOCK */
   private void opWriteBlock(DataInputStream in) throws IOException {
   private void opWriteBlock(DataInputStream in) throws IOException {
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
     final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
     final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
-    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
-        PBHelper.convertStorageType(proto.getStorageType()),
-        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
-        proto.getHeader().getClientName(),
-        targets,
-        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
-        PBHelper.convert(proto.getSource()),
-        fromProto(proto.getStage()),
-        proto.getPipelineSize(),
-        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
-        proto.getLatestGenerationStamp(),
-        fromProto(proto.getRequestedChecksum()),
-        (proto.hasCachingStrategy() ?
-            getCachingStrategy(proto.getCachingStrategy()) :
-          CachingStrategy.newDefaultStrategy()));
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+          PBHelper.convertStorageType(proto.getStorageType()),
+          PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
+          proto.getHeader().getClientName(),
+          targets,
+          PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
+          PBHelper.convert(proto.getSource()),
+          fromProto(proto.getStage()),
+          proto.getPipelineSize(),
+          proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
+          proto.getLatestGenerationStamp(),
+          fromProto(proto.getRequestedChecksum()),
+          (proto.hasCachingStrategy() ?
+              getCachingStrategy(proto.getCachingStrategy()) :
+            CachingStrategy.newDefaultStrategy()));
+     } finally {
+      if (traceScope != null) traceScope.close();
+     }
   }
   }
 
 
   /** Receive {@link Op#TRANSFER_BLOCK} */
   /** Receive {@link Op#TRANSFER_BLOCK} */
@@ -145,11 +159,17 @@ public abstract class Receiver implements DataTransferProtocol {
     final OpTransferBlockProto proto =
     final OpTransferBlockProto proto =
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
     final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
     final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
-    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
-        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
-        proto.getHeader().getClientName(),
-        targets,
-        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
+          PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
+          proto.getHeader().getClientName(),
+          targets,
+          PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
@@ -158,9 +178,15 @@ public abstract class Receiver implements DataTransferProtocol {
       OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
       OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
     SlotId slotId = (proto.hasSlotId()) ? 
     SlotId slotId = (proto.hasSlotId()) ? 
         PBHelper.convert(proto.getSlotId()) : null;
         PBHelper.convert(proto.getSlotId()) : null;
-    requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
-        PBHelper.convert(proto.getHeader().getToken()),
-        slotId, proto.getMaxVersion());
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
+          PBHelper.convert(proto.getHeader().getToken()),
+          slotId, proto.getMaxVersion());
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive {@link Op#RELEASE_SHORT_CIRCUIT_FDS} */
   /** Receive {@link Op#RELEASE_SHORT_CIRCUIT_FDS} */
@@ -168,38 +194,67 @@ public abstract class Receiver implements DataTransferProtocol {
       throws IOException {
       throws IOException {
     final ReleaseShortCircuitAccessRequestProto proto =
     final ReleaseShortCircuitAccessRequestProto proto =
       ReleaseShortCircuitAccessRequestProto.parseFrom(vintPrefixed(in));
       ReleaseShortCircuitAccessRequestProto.parseFrom(vintPrefixed(in));
-    releaseShortCircuitFds(PBHelper.convert(proto.getSlotId()));
+    TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(),
+        proto.getClass().getSimpleName());
+    try {
+      releaseShortCircuitFds(PBHelper.convert(proto.getSlotId()));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_SHM} */
   /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_SHM} */
   private void opRequestShortCircuitShm(DataInputStream in) throws IOException {
   private void opRequestShortCircuitShm(DataInputStream in) throws IOException {
     final ShortCircuitShmRequestProto proto =
     final ShortCircuitShmRequestProto proto =
         ShortCircuitShmRequestProto.parseFrom(vintPrefixed(in));
         ShortCircuitShmRequestProto.parseFrom(vintPrefixed(in));
-    requestShortCircuitShm(proto.getClientName());
+    TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(),
+        proto.getClass().getSimpleName());
+    try {
+      requestShortCircuitShm(proto.getClientName());
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive OP_REPLACE_BLOCK */
   /** Receive OP_REPLACE_BLOCK */
   private void opReplaceBlock(DataInputStream in) throws IOException {
   private void opReplaceBlock(DataInputStream in) throws IOException {
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
     OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in));
-    replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
-        PBHelper.convertStorageType(proto.getStorageType()),
-        PBHelper.convert(proto.getHeader().getToken()),
-        proto.getDelHint(),
-        PBHelper.convert(proto.getSource()));
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
+          PBHelper.convertStorageType(proto.getStorageType()),
+          PBHelper.convert(proto.getHeader().getToken()),
+          proto.getDelHint(),
+          PBHelper.convert(proto.getSource()));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive OP_COPY_BLOCK */
   /** Receive OP_COPY_BLOCK */
   private void opCopyBlock(DataInputStream in) throws IOException {
   private void opCopyBlock(DataInputStream in) throws IOException {
     OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
     OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
-    copyBlock(PBHelper.convert(proto.getHeader().getBlock()),
-        PBHelper.convert(proto.getHeader().getToken()));
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
+      copyBlock(PBHelper.convert(proto.getHeader().getBlock()),
+          PBHelper.convert(proto.getHeader().getToken()));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 
 
   /** Receive OP_BLOCK_CHECKSUM */
   /** Receive OP_BLOCK_CHECKSUM */
   private void opBlockChecksum(DataInputStream in) throws IOException {
   private void opBlockChecksum(DataInputStream in) throws IOException {
     OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
     OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in));
-    
+    TraceScope traceScope = continueTraceSpan(proto.getHeader(),
+        proto.getClass().getSimpleName());
+    try {
     blockChecksum(PBHelper.convert(proto.getHeader().getBlock()),
     blockChecksum(PBHelper.convert(proto.getHeader().getBlock()),
         PBHelper.convert(proto.getHeader().getToken()));
         PBHelper.convert(proto.getHeader().getToken()));
+    } finally {
+      if (traceScope != null) traceScope.close();
+    }
   }
   }
 }
 }

+ 20 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
@@ -47,6 +48,9 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
+import org.htrace.Trace;
+import org.htrace.Span;
+
 import com.google.protobuf.Message;
 import com.google.protobuf.Message;
 
 
 /** Sender */
 /** Sender */
@@ -185,19 +189,29 @@ public class Sender implements DataTransferProtocol {
   
   
   @Override
   @Override
   public void releaseShortCircuitFds(SlotId slotId) throws IOException {
   public void releaseShortCircuitFds(SlotId slotId) throws IOException {
-    ReleaseShortCircuitAccessRequestProto proto = 
+    ReleaseShortCircuitAccessRequestProto.Builder builder =
         ReleaseShortCircuitAccessRequestProto.newBuilder().
         ReleaseShortCircuitAccessRequestProto.newBuilder().
-        setSlotId(PBHelper.convert(slotId)).
-        build();
+        setSlotId(PBHelper.convert(slotId));
+    if (Trace.isTracing()) {
+      Span s = Trace.currentSpan();
+      builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
+          .setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
+    }
+    ReleaseShortCircuitAccessRequestProto proto = builder.build();
     send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
     send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
   }
   }
 
 
   @Override
   @Override
   public void requestShortCircuitShm(String clientName) throws IOException {
   public void requestShortCircuitShm(String clientName) throws IOException {
-    ShortCircuitShmRequestProto proto =
+    ShortCircuitShmRequestProto.Builder builder =
         ShortCircuitShmRequestProto.newBuilder().
         ShortCircuitShmRequestProto.newBuilder().
-        setClientName(clientName).
-        build();
+        setClientName(clientName);
+    if (Trace.isTracing()) {
+      Span s = Trace.currentSpan();
+      builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
+          .setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
+    }
+    ShortCircuitShmRequestProto proto = builder.build();
     send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
     send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
   }
   }
   
   

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -1335,7 +1335,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     try {
     try {
       GetEZForPathResponseProto.Builder builder =
       GetEZForPathResponseProto.Builder builder =
           GetEZForPathResponseProto.newBuilder();
           GetEZForPathResponseProto.newBuilder();
-      final EncryptionZoneWithId ret = server.getEZForPath(req.getSrc());
+      final EncryptionZone ret = server.getEZForPath(req.getSrc());
       builder.setZone(PBHelper.convert(ret));
       builder.setZone(PBHelper.convert(ret));
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1348,7 +1348,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     RpcController controller, ListEncryptionZonesRequestProto req)
     RpcController controller, ListEncryptionZonesRequestProto req)
     throws ServiceException {
     throws ServiceException {
     try {
     try {
-      BatchedEntries<EncryptionZoneWithId> entries = server
+      BatchedEntries<EncryptionZone> entries = server
           .listEncryptionZones(req.getId());
           .listEncryptionZones(req.getId());
       ListEncryptionZonesResponseProto.Builder builder =
       ListEncryptionZonesResponseProto.Builder builder =
           ListEncryptionZonesResponseProto.newBuilder();
           ListEncryptionZonesResponseProto.newBuilder();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -189,7 +189,7 @@ import com.google.protobuf.ServiceException;
 
 
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
 import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
-    .EncryptionZoneWithIdProto;
+    .EncryptionZoneProto;
 
 
 /**
 /**
  * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
  * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
@@ -1334,7 +1334,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public EncryptionZoneWithId getEZForPath(String src)
+  public EncryptionZone getEZForPath(String src)
       throws IOException {
       throws IOException {
     final GetEZForPathRequestProto.Builder builder =
     final GetEZForPathRequestProto.Builder builder =
         GetEZForPathRequestProto.newBuilder();
         GetEZForPathRequestProto.newBuilder();
@@ -1350,7 +1350,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(long id)
+  public BatchedEntries<EncryptionZone> listEncryptionZones(long id)
       throws IOException {
       throws IOException {
     final ListEncryptionZonesRequestProto req =
     final ListEncryptionZonesRequestProto req =
       ListEncryptionZonesRequestProto.newBuilder()
       ListEncryptionZonesRequestProto.newBuilder()
@@ -1359,12 +1359,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       EncryptionZonesProtos.ListEncryptionZonesResponseProto response =
       EncryptionZonesProtos.ListEncryptionZonesResponseProto response =
           rpcProxy.listEncryptionZones(null, req);
           rpcProxy.listEncryptionZones(null, req);
-      List<EncryptionZoneWithId> elements =
+      List<EncryptionZone> elements =
           Lists.newArrayListWithCapacity(response.getZonesCount());
           Lists.newArrayListWithCapacity(response.getZonesCount());
-      for (EncryptionZoneWithIdProto p : response.getZonesList()) {
+      for (EncryptionZoneProto p : response.getZonesList()) {
         elements.add(PBHelper.convert(p));
         elements.add(PBHelper.convert(p));
       }
       }
-      return new BatchedListEntries<EncryptionZoneWithId>(elements,
+      return new BatchedListEntries<EncryptionZone>(elements,
           response.getHasMore());
           response.getHasMore());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocolPB;
 
 
 import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
 import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
-    .EncryptionZoneWithIdProto;
+    .EncryptionZoneProto;
 
 
 import java.io.EOFException;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.IOException;
@@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
@@ -2328,15 +2328,15 @@ public class PBHelper {
     return builder.build();
     return builder.build();
   }
   }
 
 
-  public static EncryptionZoneWithIdProto convert(EncryptionZoneWithId zone) {
-    return EncryptionZoneWithIdProto.newBuilder()
+  public static EncryptionZoneProto convert(EncryptionZone zone) {
+    return EncryptionZoneProto.newBuilder()
         .setId(zone.getId())
         .setId(zone.getId())
         .setKeyName(zone.getKeyName())
         .setKeyName(zone.getKeyName())
         .setPath(zone.getPath()).build();
         .setPath(zone.getPath()).build();
   }
   }
 
 
-  public static EncryptionZoneWithId convert(EncryptionZoneWithIdProto proto) {
-    return new EncryptionZoneWithId(proto.getPath(), proto.getKeyName(),
+  public static EncryptionZone convert(EncryptionZoneProto proto) {
+    return new EncryptionZone(proto.getPath(), proto.getKeyName(),
         proto.getId());
         proto.getId());
   }
   }
 
 

+ 8 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java

@@ -231,11 +231,13 @@ public class DatanodeWebHdfsMethods {
       DFSClient dfsclient = newDfsClient(nnId, conf);
       DFSClient dfsclient = newDfsClient(nnId, conf);
       FSDataOutputStream out = null;
       FSDataOutputStream out = null;
       try {
       try {
-        out = new FSDataOutputStream(dfsclient.create(
+        out = dfsclient.createWrappedOutputStream(dfsclient.create(
             fullpath, permission.getFsPermission(), 
             fullpath, permission.getFsPermission(), 
-            overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
-                : EnumSet.of(CreateFlag.CREATE),
-            replication.getValue(conf), blockSize.getValue(conf), null, b, null), null);
+            overwrite.getValue() ?
+                EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
+                EnumSet.of(CreateFlag.CREATE),
+            replication.getValue(conf), blockSize.getValue(conf), null,
+            b, null), null);
         IOUtils.copyBytes(in, out, b);
         IOUtils.copyBytes(in, out, b);
         out.close();
         out.close();
         out = null;
         out = null;
@@ -418,7 +420,8 @@ public class DatanodeWebHdfsMethods {
       final DFSClient dfsclient = newDfsClient(nnId, conf);
       final DFSClient dfsclient = newDfsClient(nnId, conf);
       HdfsDataInputStream in = null;
       HdfsDataInputStream in = null;
       try {
       try {
-        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
+        in = dfsclient.createWrappedInputStream(
+            dfsclient.open(fullpath, b, true));
         in.seek(offset.getValue());
         in.seek(offset.getValue());
       } catch(IOException ioe) {
       } catch(IOException ioe) {
         IOUtils.cleanup(LOG, in);
         IOUtils.cleanup(LOG, in);

+ 12 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -53,8 +53,8 @@ public class EncryptionZoneManager {
   public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
   public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
       .class);
       .class);
 
 
-  private static final EncryptionZoneWithId NULL_EZ =
-      new EncryptionZoneWithId("", "", -1);
+  private static final EncryptionZone NULL_EZ =
+      new EncryptionZone("", "", -1);
 
 
   /**
   /**
    * EncryptionZoneInt is the internal representation of an encryption zone. The
    * EncryptionZoneInt is the internal representation of an encryption zone. The
@@ -196,18 +196,18 @@ public class EncryptionZoneManager {
   }
   }
 
 
   /**
   /**
-   * Returns an EncryptionZoneWithId representing the ez for a given path.
-   * Returns an empty marker EncryptionZoneWithId if path is not in an ez.
+   * Returns an EncryptionZone representing the ez for a given path.
+   * Returns an empty marker EncryptionZone if path is not in an ez.
    *
    *
    * @param iip The INodesInPath of the path to check
    * @param iip The INodesInPath of the path to check
-   * @return the EncryptionZoneWithId representing the ez for the path.
+   * @return the EncryptionZone representing the ez for the path.
    */
    */
-  EncryptionZoneWithId getEZINodeForPath(INodesInPath iip) {
+  EncryptionZone getEZINodeForPath(INodesInPath iip) {
     final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
     final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
     if (ezi == null) {
     if (ezi == null) {
       return NULL_EZ;
       return NULL_EZ;
     } else {
     } else {
-      return new EncryptionZoneWithId(getFullPathName(ezi), ezi.getKeyName(),
+      return new EncryptionZone(getFullPathName(ezi), ezi.getKeyName(),
           ezi.getINodeId());
           ezi.getINodeId());
     }
     }
   }
   }
@@ -300,19 +300,19 @@ public class EncryptionZoneManager {
    * <p/>
    * <p/>
    * Called while holding the FSDirectory lock.
    * Called while holding the FSDirectory lock.
    */
    */
-  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+  BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
       throws IOException {
       throws IOException {
     assert dir.hasReadLock();
     assert dir.hasReadLock();
     NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap
     NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap
         (prevId, false);
         (prevId, false);
     final int numResponses = Math.min(maxListEncryptionZonesResponses,
     final int numResponses = Math.min(maxListEncryptionZonesResponses,
         tailMap.size());
         tailMap.size());
-    final List<EncryptionZoneWithId> zones =
+    final List<EncryptionZone> zones =
         Lists.newArrayListWithExpectedSize(numResponses);
         Lists.newArrayListWithExpectedSize(numResponses);
 
 
     int count = 0;
     int count = 0;
     for (EncryptionZoneInt ezi : tailMap.values()) {
     for (EncryptionZoneInt ezi : tailMap.values()) {
-      zones.add(new EncryptionZoneWithId(getFullPathName(ezi),
+      zones.add(new EncryptionZone(getFullPathName(ezi),
           ezi.getKeyName(), ezi.getINodeId()));
           ezi.getKeyName(), ezi.getINodeId()));
       count++;
       count++;
       if (count >= numResponses) {
       if (count >= numResponses) {
@@ -320,6 +320,6 @@ public class EncryptionZoneManager {
       }
       }
     }
     }
     final boolean hasMore = (numResponses < tailMap.size());
     final boolean hasMore = (numResponses < tailMap.size());
-    return new BatchedListEntries<EncryptionZoneWithId>(zones, hasMore);
+    return new BatchedListEntries<EncryptionZone>(zones, hasMore);
   }
   }
 }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -60,7 +60,7 @@ import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
@@ -2730,7 +2730,7 @@ public class FSDirectory implements Closeable {
     }
     }
   }
   }
 
 
-  EncryptionZoneWithId getEZForPath(INodesInPath iip) {
+  EncryptionZone getEZForPath(INodesInPath iip) {
     readLock();
     readLock();
     try {
     try {
       return ezManager.getEZINodeForPath(iip);
       return ezManager.getEZINodeForPath(iip);
@@ -2739,7 +2739,7 @@ public class FSDirectory implements Closeable {
     }
     }
   }
   }
 
 
-  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+  BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
       throws IOException {
       throws IOException {
     readLock();
     readLock();
     try {
     try {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -178,7 +178,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -8677,7 +8677,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * @throws AccessControlException  if the caller is not the superuser.
    * @throws AccessControlException  if the caller is not the superuser.
    * @throws UnresolvedLinkException if the path can't be resolved.
    * @throws UnresolvedLinkException if the path can't be resolved.
    */
    */
-  EncryptionZoneWithId getEZForPath(final String srcArg)
+  EncryptionZone getEZForPath(final String srcArg)
     throws AccessControlException, UnresolvedLinkException, IOException {
     throws AccessControlException, UnresolvedLinkException, IOException {
     String src = srcArg;
     String src = srcArg;
     HdfsFileStatus resultingStat = null;
     HdfsFileStatus resultingStat = null;
@@ -8694,7 +8694,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       src = resolvePath(src, pathComponents);
       src = resolvePath(src, pathComponents);
       final INodesInPath iip = dir.getINodesInPath(src, true);
       final INodesInPath iip = dir.getINodesInPath(src, true);
-      final EncryptionZoneWithId ret = dir.getEZForPath(iip);
+      final EncryptionZone ret = dir.getEZForPath(iip);
       resultingStat = getAuditFileInfo(src, false);
       resultingStat = getAuditFileInfo(src, false);
       success = true;
       success = true;
       return ret;
       return ret;
@@ -8704,7 +8704,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
   }
   }
 
 
-  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+  BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
       throws IOException {
       throws IOException {
     boolean success = false;
     boolean success = false;
     checkSuperuserPrivilege();
     checkSuperuserPrivilege();
@@ -8713,7 +8713,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       checkSuperuserPrivilege();
       checkSuperuserPrivilege();
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
-      final BatchedListEntries<EncryptionZoneWithId> ret =
+      final BatchedListEntries<EncryptionZone> ret =
           dir.listEncryptionZones(prevId);
           dir.listEncryptionZones(prevId);
       success = true;
       success = true;
       return ret;
       return ret;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -81,7 +81,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -1443,13 +1443,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   @Override
   @Override
-  public EncryptionZoneWithId getEZForPath(String src)
+  public EncryptionZone getEZForPath(String src)
     throws IOException {
     throws IOException {
     return namesystem.getEZForPath(src);
     return namesystem.getEZForPath(src);
   }
   }
 
 
   @Override
   @Override
-  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+  public BatchedEntries<EncryptionZone> listEncryptionZones(
       long prevId) throws IOException {
       long prevId) throws IOException {
     return namesystem.listEncryptionZones(prevId);
     return namesystem.listEncryptionZones(prevId);
   }
   }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto

@@ -47,6 +47,12 @@ message DataTransferEncryptorMessageProto {
 message BaseHeaderProto {
 message BaseHeaderProto {
   required ExtendedBlockProto block = 1;
   required ExtendedBlockProto block = 1;
   optional hadoop.common.TokenProto token = 2;
   optional hadoop.common.TokenProto token = 2;
+  optional DataTransferTraceInfoProto traceInfo = 3;
+}
+
+message DataTransferTraceInfoProto {
+  required uint64 traceId = 1;
+  required uint64 parentId = 2;
 }
 }
 
 
 message ClientOperationHeaderProto {
 message ClientOperationHeaderProto {
@@ -166,6 +172,7 @@ message OpRequestShortCircuitAccessProto {
 
 
 message ReleaseShortCircuitAccessRequestProto {
 message ReleaseShortCircuitAccessRequestProto {
   required ShortCircuitShmSlotProto slotId = 1;
   required ShortCircuitShmSlotProto slotId = 1;
+  optional DataTransferTraceInfoProto traceInfo = 2;
 }
 }
 
 
 message ReleaseShortCircuitAccessResponseProto {
 message ReleaseShortCircuitAccessResponseProto {
@@ -177,6 +184,7 @@ message ShortCircuitShmRequestProto {
   // The name of the client requesting the shared memory segment.  This is
   // The name of the client requesting the shared memory segment.  This is
   // purely for logging / debugging purposes.
   // purely for logging / debugging purposes.
   required string clientName = 1;
   required string clientName = 1;
+  optional DataTransferTraceInfoProto traceInfo = 2;
 }
 }
 
 
 message ShortCircuitShmResponseProto { 
 message ShortCircuitShmResponseProto { 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto

@@ -45,14 +45,14 @@ message ListEncryptionZonesRequestProto {
   required int64 id = 1;
   required int64 id = 1;
 }
 }
 
 
-message EncryptionZoneWithIdProto {
+message EncryptionZoneProto {
   required string path = 1;
   required string path = 1;
   required string keyName = 2;
   required string keyName = 2;
   required int64 id = 3;
   required int64 id = 3;
 }
 }
 
 
 message ListEncryptionZonesResponseProto {
 message ListEncryptionZonesResponseProto {
-  repeated EncryptionZoneWithIdProto zones = 1;
+  repeated EncryptionZoneProto zones = 1;
   required bool hasMore = 2;
   required bool hasMore = 2;
 }
 }
 
 
@@ -61,5 +61,5 @@ message GetEZForPathRequestProto {
 }
 }
 
 
 message GetEZForPathResponseProto {
 message GetEZForPathResponseProto {
-    required EncryptionZoneWithIdProto zone = 1;
+    required EncryptionZoneProto zone = 1;
 }
 }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java

@@ -257,9 +257,8 @@ public class TestDFSClientFailover {
    */
    */
   @Test
   @Test
   public void testDoesntDnsResolveLogicalURI() throws Exception {
   public void testDoesntDnsResolveLogicalURI() throws Exception {
-    NameService spyNS = spyOnNameService();
-    
     FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
     FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+    NameService spyNS = spyOnNameService();
     String logicalHost = fs.getUri().getHost();
     String logicalHost = fs.getUri().getHost();
     Path qualifiedRoot = fs.makeQualified(new Path("/"));
     Path qualifiedRoot = fs.makeQualified(new Path("/"));
     
     
@@ -276,8 +275,8 @@ public class TestDFSClientFailover {
    */
    */
   @Test
   @Test
   public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
   public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
-    NameService spyNS = spyOnNameService();
     FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
     FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+    NameService spyNS = spyOnNameService();
     String logicalHost = fs.getUri().getHost();
     String logicalHost = fs.getUri().getHost();
     Configuration haClientConf = fs.getConf();
     Configuration haClientConf = fs.getConf();
     
     

+ 58 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSTestWrapper;
 import org.apache.hadoop.fs.FSTestWrapper;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestWrapper;
 import org.apache.hadoop.fs.FileContextTestWrapper;
@@ -62,6 +63,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -109,6 +112,11 @@ public class TestEncryptionZones {
   protected FileSystemTestWrapper fsWrapper;
   protected FileSystemTestWrapper fsWrapper;
   protected FileContextTestWrapper fcWrapper;
   protected FileContextTestWrapper fcWrapper;
 
 
+  protected String getKeyProviderURI() {
+    return JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir +
+        "/test.jks";
+  }
+
   @Before
   @Before
   public void setup() throws Exception {
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     conf = new HdfsConfiguration();
@@ -116,10 +124,7 @@ public class TestEncryptionZones {
     // Set up java key store
     // Set up java key store
     String testRoot = fsHelper.getTestRootDir();
     String testRoot = fsHelper.getTestRootDir();
     testRootDir = new File(testRoot).getAbsoluteFile();
     testRootDir = new File(testRoot).getAbsoluteFile();
-    final Path jksPath = new Path(testRootDir.toString(), "test.jks");
-    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
-        JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
-    );
+    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, getKeyProviderURI());
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     // Lower the batch size for testing
     // Lower the batch size for testing
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
@@ -570,6 +575,55 @@ public class TestEncryptionZones {
     verifyFilesEqual(fs, encFile1, encFile2, len);
     verifyFilesEqual(fs, encFile1, encFile2, len);
   }
   }
 
 
+  @Test(timeout = 120000)
+  public void testReadWriteUsingWebHdfs() throws Exception {
+    final HdfsAdmin dfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+        WebHdfsFileSystem.SCHEME);
+
+    final Path zone = new Path("/zone");
+    fs.mkdirs(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY);
+
+    /* Create an unencrypted file for comparison purposes. */
+    final Path unencFile = new Path("/unenc");
+    final int len = 8192;
+    DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);
+
+    /*
+     * Create the same file via webhdfs, but this time encrypted. Compare it
+     * using both webhdfs and DFS.
+     */
+    final Path encFile1 = new Path(zone, "myfile");
+    DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
+    verifyFilesEqual(fs, unencFile, encFile1, len);
+
+    /*
+     * Same thing except this time create the encrypted file using DFS.
+     */
+    final Path encFile2 = new Path(zone, "myfile2");
+    DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
+    verifyFilesEqual(fs, unencFile, encFile2, len);
+
+    /* Verify appending to files works correctly. */
+    appendOneByte(fs, unencFile);
+    appendOneByte(webHdfsFs, encFile1);
+    appendOneByte(fs, encFile2);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
+    verifyFilesEqual(fs, unencFile, encFile1, len);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
+    verifyFilesEqual(fs, unencFile, encFile2, len);
+  }
+
+  private void appendOneByte(FileSystem fs, Path p) throws IOException {
+    final FSDataOutputStream out = fs.append(p);
+    out.write((byte) 0x123);
+    out.close();
+  }
+
   @Test(timeout = 60000)
   @Test(timeout = 60000)
   public void testCipherSuiteNegotiation() throws Exception {
   public void testCipherSuiteNegotiation() throws Exception {
     final HdfsAdmin dfsAdmin =
     final HdfsAdmin dfsAdmin =

+ 56 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+
+import java.io.File;
+import java.util.UUID;
+
+public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
+
+  private MiniKMS miniKMS;
+
+  @Override
+  protected String getKeyProviderURI() {
+    return KMSClientProvider.SCHEME_NAME + "://" +
+        miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
+  }
+
+  @Before
+  public void setup() throws Exception {
+    File kmsDir = new File("target/test-classes/" +
+        UUID.randomUUID().toString());
+    Assert.assertTrue(kmsDir.mkdirs());
+    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
+    miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
+    miniKMS.start();
+    super.setup();
+  }
+
+  @After
+  public void teardown() {
+    super.teardown();
+    miniKMS.stop();
+  }
+
+}

+ 55 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -35,19 +35,26 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.junit.Assert;
 import org.junit.Assert;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -601,6 +608,53 @@ public class TestBlockManager {
         new BlockListAsLongs(null, null));
         new BlockListAsLongs(null, null));
     assertEquals(1, ds.getBlockReportCount());
     assertEquals(1, ds.getBlockReportCount());
   }
   }
+  
+  /**
+   * Tests that a namenode doesn't choose a datanode with full disks to 
+   * store blocks.
+   * @throws Exception
+   */
+  @Test
+  public void testStorageWithRemainingCapacity() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    FileSystem fs = FileSystem.get(conf);
+    Path file1 = null;
+    try {
+      cluster.waitActive();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final String poolId = namesystem.getBlockPoolId();
+      final DatanodeRegistration nodeReg =
+        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
+        		get(0), poolId);
+      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
+    		  nodeReg);
+      // By default, MiniDFSCluster will create 1 datanode with 2 storages.
+      // Assigning 64k for remaining storage capacity and will 
+      //create a file with 100k.
+      for(DatanodeStorageInfo storage:  dd.getStorageInfos()) { 
+    	  storage.setUtilizationForTesting(65536, 0, 65536, 0);
+      }
+      //sum of the remaining capacity of both the storages
+      dd.setRemaining(131072);
+      file1 = new Path("testRemainingStorage.dat");
+      try {
+        DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
+        		0x1BAD5EED);
+      }
+      catch (RemoteException re) {
+    	  GenericTestUtils.assertExceptionContains("nodes instead of "
+    	  		+ "minReplication", re);
+      }
+    }
+    finally {
+      // Clean up
+      assertTrue(fs.exists(file1));
+      fs.delete(file1, true);
+      assertTrue(!fs.exists(file1));
+      cluster.shutdown();
+    }
+  }
 
 
   @Test
   @Test
   public void testUseDelHint() {
   public void testUseDelHint() {
@@ -616,4 +670,4 @@ public class TestBlockManager {
     Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
     Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
         moreThan1Racks, excessTypes));
         moreThan1Racks, excessTypes));
   }
   }
-}
+}

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -482,9 +482,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
   }
 
 
   @Override // FsDatasetSpi
   @Override // FsDatasetSpi
-  public synchronized void unfinalizeBlock(ExtendedBlock b) {
+  public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException{
     if (isValidRbw(b)) {
     if (isValidRbw(b)) {
-      blockMap.remove(b.getLocalBlock());
+      final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
+      map.remove(b.getLocalBlock());
     }
     }
   }
   }
 
 
@@ -624,7 +625,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
         continue;
         continue;
       }
       }
       storage.free(bpid, binfo.getNumBytes());
       storage.free(bpid, binfo.getNumBytes());
-      blockMap.remove(b);
+      map.remove(b);
     }
     }
     if (error) {
     if (error) {
       throw new IOException("Invalidate: Missing blocks.");
       throw new IOException("Invalidate: Missing blocks.");

+ 0 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java

@@ -232,26 +232,6 @@ public class TestDFSHAAdminMiniCluster {
     assertFalse("Both namenodes cannot be active", nn1.isActiveState() 
     assertFalse("Both namenodes cannot be active", nn1.isActiveState() 
         && nn2.isActiveState());
         && nn2.isActiveState());
    
    
-    /*  This test case doesn't allow nn2 to transition to Active even with
-        forceActive switch since nn1 is already active  */
-    if(nn1.getState() != null && !nn1.getState().
-        equals(HAServiceState.STANDBY.name()) ) {
-      cluster.transitionToStandby(0);
-    }
-    if(nn2.getState() != null && !nn2.getState().
-        equals(HAServiceState.STANDBY.name()) ) {
-      cluster.transitionToStandby(1);
-    }
-    //Making sure both the namenode are in standby state
-    assertTrue(nn1.isStandbyState());
-    assertTrue(nn2.isStandbyState());
-    
-    runTool("-transitionToActive", "nn1");
-    runTool("-transitionToActive", "nn2","--forceactive");
-    
-    assertFalse("Both namenodes cannot be active even though with forceActive",
-        nn1.isActiveState() && nn2.isActiveState());
-
     /*  In this test case, we have deliberately shut down nn1 and this will
     /*  In this test case, we have deliberately shut down nn1 and this will
         cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
         cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
         and transitionToActive for nn2 with  forceActive switch will succeed 
         and transitionToActive for nn2 with  forceActive switch will succeed 

+ 38 - 14
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.htrace.HTraceConfiguration;
 import org.htrace.HTraceConfiguration;
 import org.htrace.Sampler;
 import org.htrace.Sampler;
 import org.htrace.Span;
 import org.htrace.Span;
@@ -39,11 +40,13 @@ import org.junit.Test;
 import java.io.IOException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.base.Supplier;
 
 
 public class TestTracing {
 public class TestTracing {
 
 
@@ -81,7 +84,12 @@ public class TestTracing {
       "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.create",
       "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.create",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
       "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.fsync",
       "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.fsync",
-      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.complete"
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.complete",
+      "DFSOutputStream",
+      "OpWriteBlockProto",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.addBlock"
     };
     };
     assertSpanNamesFound(expectedSpanNames);
     assertSpanNamesFound(expectedSpanNames);
 
 
@@ -96,7 +104,7 @@ public class TestTracing {
 
 
     // There should only be one trace id as it should all be homed in the
     // There should only be one trace id as it should all be homed in the
     // top trace.
     // top trace.
-    for (Span span : SetSpanReceiver.SetHolder.spans) {
+    for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
       Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
       Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
     }
     }
   }
   }
@@ -152,7 +160,8 @@ public class TestTracing {
     String[] expectedSpanNames = {
     String[] expectedSpanNames = {
       "testReadTraceHooks",
       "testReadTraceHooks",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
-      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.getBlockLocations"
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.getBlockLocations",
+      "OpReadBlockProto"
     };
     };
     assertSpanNamesFound(expectedSpanNames);
     assertSpanNamesFound(expectedSpanNames);
 
 
@@ -168,7 +177,7 @@ public class TestTracing {
 
 
     // There should only be one trace id as it should all be homed in the
     // There should only be one trace id as it should all be homed in the
     // top trace.
     // top trace.
-    for (Span span : SetSpanReceiver.SetHolder.spans) {
+    for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
       Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
       Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
     }
     }
   }
   }
@@ -228,10 +237,24 @@ public class TestTracing {
     cluster.shutdown();
     cluster.shutdown();
   }
   }
 
 
-  private void assertSpanNamesFound(String[] expectedSpanNames) {
-    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
-    for (String spanName : expectedSpanNames) {
-      Assert.assertTrue("Should find a span with name " + spanName, map.get(spanName) != null);
+  static void assertSpanNamesFound(final String[] expectedSpanNames) {
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
+          for (String spanName : expectedSpanNames) {
+            if (!map.containsKey(spanName)) {
+              return false;
+            }
+          }
+          return true;
+        }
+      }, 100, 1000);
+    } catch (TimeoutException e) {
+      Assert.fail("timed out to get expected spans: " + e.getMessage());
+    } catch (InterruptedException e) {
+      Assert.fail("interrupted while waiting spans: " + e.getMessage());
     }
     }
   }
   }
 
 
@@ -249,15 +272,16 @@ public class TestTracing {
     }
     }
 
 
     public void receiveSpan(Span span) {
     public void receiveSpan(Span span) {
-      SetHolder.spans.add(span);
+      SetHolder.spans.put(span.getSpanId(), span);
     }
     }
 
 
     public void close() {
     public void close() {
     }
     }
 
 
     public static class SetHolder {
     public static class SetHolder {
-      public static Set<Span> spans = new HashSet<Span>();
-
+      public static ConcurrentHashMap<Long, Span> spans = 
+          new ConcurrentHashMap<Long, Span>();
+          
       public static int size() {
       public static int size() {
         return spans.size();
         return spans.size();
       }
       }
@@ -265,7 +289,7 @@ public class TestTracing {
       public static Map<String, List<Span>> getMap() {
       public static Map<String, List<Span>> getMap() {
         Map<String, List<Span>> map = new HashMap<String, List<Span>>();
         Map<String, List<Span>> map = new HashMap<String, List<Span>>();
 
 
-        for (Span s : spans) {
+        for (Span s : spans.values()) {
           List<Span> l = map.get(s.getDescription());
           List<Span> l = map.get(s.getDescription());
           if (l == null) {
           if (l == null) {
             l = new LinkedList<Span>();
             l = new LinkedList<Span>();

+ 97 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java

@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.htrace.Sampler;
+import org.htrace.Span;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import java.io.IOException;
+
+public class TestTracingShortCircuitLocalRead {
+  private static Configuration conf;
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem dfs;
+  private static SpanReceiverHost spanReceiverHost;
+  private static TemporarySocketDirectory sockDir;
+  static final Path TEST_PATH = new Path("testShortCircuitTraceHooks");
+  static final int TEST_LENGTH = 1234;
+
+  @BeforeClass
+  public static void init() {
+    sockDir = new TemporarySocketDirectory();
+    DomainSocket.disableBindPathValidation();
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+    sockDir.close();
+  }
+
+  @Test
+  public void testShortCircuitTraceHooks() throws IOException {
+    conf = new Configuration();
+    conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY,
+        TestTracing.SetSpanReceiver.class.getName());
+    conf.setLong("dfs.blocksize", 100 * 1024);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
+    conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
+        "testShortCircuitTraceHooks._PORT");
+    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1)
+        .build();
+    dfs = cluster.getFileSystem();
+
+    try {
+      spanReceiverHost = SpanReceiverHost.getInstance(conf);
+      DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);
+
+      TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
+      FSDataInputStream stream = dfs.open(TEST_PATH);
+      byte buf[] = new byte[TEST_LENGTH];
+      IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
+      stream.close();
+      ts.close();
+
+      String[] expectedSpanNames = {
+        "OpRequestShortCircuitAccessProto",
+        "ShortCircuitShmRequestProto"
+      };
+      TestTracing.assertSpanNamesFound(expectedSpanNames);
+    } finally {
+      dfs.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 5 - 2
hadoop-mapreduce-project/bin/mapred-config.sh

@@ -20,8 +20,11 @@
 
 
 function hadoop_subproject_init
 function hadoop_subproject_init
 {
 {
-  if [[ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]]; then
-    . "${HADOOP_CONF_DIR}/mapred-env.sh"
+  if [[ -z "${HADOOP_MAPRED_ENV_PROCESSED}" ]]; then
+    if [[ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]]; then
+      . "${HADOOP_CONF_DIR}/mapred-env.sh"
+      export HADOOP_MAPRED_ENV_PROCESSED=true
+    fi
   fi
   fi
   
   
   # at some point in time, someone thought it would be a good idea to
   # at some point in time, someone thought it would be a good idea to

+ 25 - 0
hadoop-project/pom.xml

@@ -334,6 +334,20 @@
         <version>${project.version}</version>
         <version>${project.version}</version>
       </dependency>
       </dependency>
 
 
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-kms</artifactId>
+        <version>${project.version}</version>
+        <classifier>classes</classifier>
+        <type>jar</type>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-kms</artifactId>
+        <version>${project.version}</version>
+        <type>test-jar</type>
+      </dependency>
+
       <dependency>
       <dependency>
         <groupId>com.google.guava</groupId>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
         <artifactId>guava</artifactId>
@@ -849,6 +863,17 @@
        <artifactId>xercesImpl</artifactId>
        <artifactId>xercesImpl</artifactId>
        <version>2.9.1</version>
        <version>2.9.1</version>
      </dependency>
      </dependency>
+
+     <dependency>
+       <groupId>org.apache.curator</groupId>
+       <artifactId>curator-framework</artifactId>
+       <version>2.6.0</version>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.curator</groupId>
+       <artifactId>curator-test</artifactId>
+       <version>2.6.0</version>
+     </dependency>
       
       
     </dependencies>
     </dependencies>
   </dependencyManagement>
   </dependencyManagement>

+ 12 - 24
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java

@@ -21,46 +21,37 @@ package org.apache.hadoop.fs.s3a;
 
 
 public class Constants {
 public class Constants {
   // s3 access key
   // s3 access key
-  public static final String OLD_ACCESS_KEY = "fs.s3a.awsAccessKeyId";
-  public static final String NEW_ACCESS_KEY = "fs.s3a.access.key";
+  public static final String ACCESS_KEY = "fs.s3a.access.key";
 
 
   // s3 secret key
   // s3 secret key
-  public static final String OLD_SECRET_KEY = "fs.s3a.awsSecretAccessKey";
-  public static final String NEW_SECRET_KEY = "fs.s3a.secret.key";
+  public static final String SECRET_KEY = "fs.s3a.secret.key";
   
   
   // number of simultaneous connections to s3
   // number of simultaneous connections to s3
-  public static final String OLD_MAXIMUM_CONNECTIONS = "fs.s3a.maxConnections";
-  public static final String NEW_MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";
+  public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";
   public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15;
   public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15;
   
   
   // connect to s3 over ssl?
   // connect to s3 over ssl?
-  public static final String OLD_SECURE_CONNECTIONS = "fs.s3a.secureConnections";
-  public static final String NEW_SECURE_CONNECTIONS = "fs.s3a.connection.ssl.enabled";
+  public static final String SECURE_CONNECTIONS = "fs.s3a.connection.ssl.enabled";
   public static final boolean DEFAULT_SECURE_CONNECTIONS = true;
   public static final boolean DEFAULT_SECURE_CONNECTIONS = true;
   
   
   // number of times we should retry errors
   // number of times we should retry errors
-  public static final String OLD_MAX_ERROR_RETRIES = "fs.s3a.maxErrorRetries";
-  public static final String NEW_MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
+  public static final String MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
   public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
   public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
   
   
   // seconds until we give up on a connection to s3
   // seconds until we give up on a connection to s3
-  public static final String OLD_SOCKET_TIMEOUT = "fs.s3a.socketTimeout";
-  public static final String NEW_SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
+  public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
   public static final int DEFAULT_SOCKET_TIMEOUT = 50000;
   public static final int DEFAULT_SOCKET_TIMEOUT = 50000;
 
 
   // number of records to get while paging through a directory listing
   // number of records to get while paging through a directory listing
-  public static final String OLD_MAX_PAGING_KEYS = "fs.s3a.maxPagingKeys";
-  public static final String NEW_MAX_PAGING_KEYS = "fs.s3a.paging.maximum";
+  public static final String MAX_PAGING_KEYS = "fs.s3a.paging.maximum";
   public static final int DEFAULT_MAX_PAGING_KEYS = 5000;
   public static final int DEFAULT_MAX_PAGING_KEYS = 5000;
 
 
   // size of each of or multipart pieces in bytes
   // size of each of or multipart pieces in bytes
-  public static final String OLD_MULTIPART_SIZE = "fs.s3a.multipartSize";
-  public static final String NEW_MULTIPART_SIZE = "fs.s3a.multipart.size";
+  public static final String MULTIPART_SIZE = "fs.s3a.multipart.size";
   public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB
   public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB
   
   
   // minimum size in bytes before we start a multipart uploads or copy
   // minimum size in bytes before we start a multipart uploads or copy
-  public static final String OLD_MIN_MULTIPART_THRESHOLD = "fs.s3a.minMultipartSize";
-  public static final String NEW_MIN_MULTIPART_THRESHOLD = "fs.s3a.multipart.threshold";
+  public static final String MIN_MULTIPART_THRESHOLD = "fs.s3a.multipart.threshold";
   public static final int DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;
   public static final int DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;
   
   
   // comma separated list of directories
   // comma separated list of directories
@@ -68,18 +59,15 @@ public class Constants {
 
 
   // private | public-read | public-read-write | authenticated-read | 
   // private | public-read | public-read-write | authenticated-read | 
   // log-delivery-write | bucket-owner-read | bucket-owner-full-control
   // log-delivery-write | bucket-owner-read | bucket-owner-full-control
-  public static final String OLD_CANNED_ACL = "fs.s3a.cannedACL";
-  public static final String NEW_CANNED_ACL = "fs.s3a.acl.default";
+  public static final String CANNED_ACL = "fs.s3a.acl.default";
   public static final String DEFAULT_CANNED_ACL = "";
   public static final String DEFAULT_CANNED_ACL = "";
 
 
   // should we try to purge old multipart uploads when starting up
   // should we try to purge old multipart uploads when starting up
-  public static final String OLD_PURGE_EXISTING_MULTIPART = "fs.s3a.purgeExistingMultiPart";
-  public static final String NEW_PURGE_EXISTING_MULTIPART = "fs.s3a.multipart.purge";
+  public static final String PURGE_EXISTING_MULTIPART = "fs.s3a.multipart.purge";
   public static final boolean DEFAULT_PURGE_EXISTING_MULTIPART = false;
   public static final boolean DEFAULT_PURGE_EXISTING_MULTIPART = false;
 
 
   // purge any multipart uploads older than this number of seconds
   // purge any multipart uploads older than this number of seconds
-  public static final String OLD_PURGE_EXISTING_MULTIPART_AGE = "fs.s3a.purgeExistingMultiPartAge";
-  public static final String NEW_PURGE_EXISTING_MULTIPART_AGE = "fs.s3a.multipart.purge.age";
+  public static final String PURGE_EXISTING_MULTIPART_AGE = "fs.s3a.multipart.purge.age";
   public static final long DEFAULT_PURGE_EXISTING_MULTIPART_AGE = 14400;
   public static final long DEFAULT_PURGE_EXISTING_MULTIPART_AGE = 14400;
 
 
   // s3 server-side encryption
   // s3 server-side encryption

+ 21 - 25
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

@@ -95,8 +95,8 @@ public class S3AFileSystem extends FileSystem {
         this.getWorkingDirectory());
         this.getWorkingDirectory());
 
 
     // Try to get our credentials or just connect anonymously
     // Try to get our credentials or just connect anonymously
-    String accessKey = conf.get(NEW_ACCESS_KEY, conf.get(OLD_ACCESS_KEY, null));
-    String secretKey = conf.get(NEW_SECRET_KEY, conf.get(OLD_SECRET_KEY, null));
+    String accessKey = conf.get(ACCESS_KEY, null);
+    String secretKey = conf.get(SECRET_KEY, null);
 
 
     String userInfo = name.getUserInfo();
     String userInfo = name.getUserInfo();
     if (userInfo != null) {
     if (userInfo != null) {
@@ -118,37 +118,33 @@ public class S3AFileSystem extends FileSystem {
     bucket = name.getHost();
     bucket = name.getHost();
 
 
     ClientConfiguration awsConf = new ClientConfiguration();
     ClientConfiguration awsConf = new ClientConfiguration();
-    awsConf.setMaxConnections(conf.getInt(NEW_MAXIMUM_CONNECTIONS, 
-      conf.getInt(OLD_MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)));
-    awsConf.setProtocol(conf.getBoolean(NEW_SECURE_CONNECTIONS, 
-      conf.getBoolean(OLD_SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) ? 
-        Protocol.HTTPS : Protocol.HTTP);
-    awsConf.setMaxErrorRetry(conf.getInt(NEW_MAX_ERROR_RETRIES, 
-      conf.getInt(OLD_MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)));
-    awsConf.setSocketTimeout(conf.getInt(NEW_SOCKET_TIMEOUT, 
-      conf.getInt(OLD_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)));
+    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, 
+      DEFAULT_MAXIMUM_CONNECTIONS));
+    awsConf.setProtocol(conf.getBoolean(SECURE_CONNECTIONS, 
+      DEFAULT_SECURE_CONNECTIONS) ?  Protocol.HTTPS : Protocol.HTTP);
+    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
+      DEFAULT_MAX_ERROR_RETRIES));
+    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
+      DEFAULT_SOCKET_TIMEOUT));
 
 
     s3 = new AmazonS3Client(credentials, awsConf);
     s3 = new AmazonS3Client(credentials, awsConf);
 
 
-    maxKeys = conf.getInt(NEW_MAX_PAGING_KEYS, 
-      conf.getInt(OLD_MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS));
-    partSize = conf.getLong(NEW_MULTIPART_SIZE, 
-      conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE));
-    partSizeThreshold = conf.getInt(NEW_MIN_MULTIPART_THRESHOLD, 
-      conf.getInt(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD));
+    maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
+    partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
+    partSizeThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, 
+      DEFAULT_MIN_MULTIPART_THRESHOLD);
 
 
     if (partSize < 5 * 1024 * 1024) {
     if (partSize < 5 * 1024 * 1024) {
-      LOG.error(NEW_MULTIPART_SIZE + " must be at least 5 MB");
+      LOG.error(MULTIPART_SIZE + " must be at least 5 MB");
       partSize = 5 * 1024 * 1024;
       partSize = 5 * 1024 * 1024;
     }
     }
 
 
     if (partSizeThreshold < 5 * 1024 * 1024) {
     if (partSizeThreshold < 5 * 1024 * 1024) {
-      LOG.error(NEW_MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
+      LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
       partSizeThreshold = 5 * 1024 * 1024;
       partSizeThreshold = 5 * 1024 * 1024;
     }
     }
 
 
-    String cannedACLName = conf.get(NEW_CANNED_ACL, 
-      conf.get(OLD_CANNED_ACL, DEFAULT_CANNED_ACL));
+    String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
     if (!cannedACLName.isEmpty()) {
     if (!cannedACLName.isEmpty()) {
       cannedACL = CannedAccessControlList.valueOf(cannedACLName);
       cannedACL = CannedAccessControlList.valueOf(cannedACLName);
     } else {
     } else {
@@ -159,10 +155,10 @@ public class S3AFileSystem extends FileSystem {
       throw new IOException("Bucket " + bucket + " does not exist");
       throw new IOException("Bucket " + bucket + " does not exist");
     }
     }
 
 
-    boolean purgeExistingMultipart = conf.getBoolean(NEW_PURGE_EXISTING_MULTIPART, 
-      conf.getBoolean(OLD_PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART));
-    long purgeExistingMultipartAge = conf.getLong(NEW_PURGE_EXISTING_MULTIPART_AGE, 
-      conf.getLong(OLD_PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE));
+    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, 
+      DEFAULT_PURGE_EXISTING_MULTIPART);
+    long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE, 
+      DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
 
 
     if (purgeExistingMultipart) {
     if (purgeExistingMultipart) {
       TransferManager transferManager = new TransferManager(s3);
       TransferManager transferManager = new TransferManager(s3);

+ 2 - 4
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java

@@ -75,10 +75,8 @@ public class S3AOutputStream extends OutputStream {
     this.statistics = statistics;
     this.statistics = statistics;
     this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
     this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
 
 
-    partSize = conf.getLong(NEW_MULTIPART_SIZE, 
-      conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE));
-    partSizeThreshold = conf.getInt(NEW_MIN_MULTIPART_THRESHOLD, 
-      conf.getInt(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD));
+    partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
+    partSizeThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
 
 
     if (conf.get(BUFFER_DIR, null) != null) {
     if (conf.get(BUFFER_DIR, null) != null) {
       lDirAlloc = new LocalDirAllocator(BUFFER_DIR);
       lDirAlloc = new LocalDirAllocator(BUFFER_DIR);

+ 15 - 0
hadoop-yarn-project/CHANGES.txt

@@ -8,6 +8,8 @@ Trunk - Unreleased
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    YARN-2438. yarn-env.sh cleanup (aw)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -82,6 +84,10 @@ Release 2.6.0 - UNRELEASED
     failures should be ignored towards counting max-attempts. (Xuan Gong via
     failures should be ignored towards counting max-attempts. (Xuan Gong via
     vinodkv)
     vinodkv)
 
 
+    YARN-2531. Added a configuration for admins to be able to override app-configs
+    and enforce/not-enforce strict control of per-container cpu usage. (Varun
+    Vasudev via vinodkv)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
     YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
@@ -223,6 +229,9 @@ Release 2.6.0 - UNRELEASED
     YARN-2547. Cross Origin Filter throws UnsupportedOperationException upon
     YARN-2547. Cross Origin Filter throws UnsupportedOperationException upon
     destroy (Mit Desai via jeagles)
     destroy (Mit Desai via jeagles)
 
 
+    YARN-2557. Add a parameter "attempt_Failures_Validity_Interval" into
+    DistributedShell (xgong)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -359,6 +368,12 @@ Release 2.6.0 - UNRELEASED
     header and made it accept multiple origins in CrossOriginFilter. (Jonathan
     header and made it accept multiple origins in CrossOriginFilter. (Jonathan
     Eagles via zjshen)
     Eagles via zjshen)
 
 
+    YARN-2549. TestContainerLaunch fails due to classpath problem with hamcrest
+    classes. (cnauroth)
+
+    YARN-2529. Generic history service RPC interface doesn't work when service
+    authorization is enabled. (Zhijie Shen via jianhe)
+
 Release 2.5.1 - 2014-09-05
 Release 2.5.1 - 2014-09-05
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 7 - 4
hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh

@@ -24,10 +24,13 @@ function hadoop_subproject_init
   # ...
   # ...
   # this should get deprecated at some point.
   # this should get deprecated at some point.
   
   
-  if [[ -e "${YARN_CONF_DIR}/yarn-env.sh" ]]; then
-    . "${YARN_CONF_DIR}/yarn-env.sh"
-  elif [[ -e "${HADOOP_CONF_DIR}/yarn-env.sh" ]]; then
-    . "${HADOOP_CONF_DIR}/yarn-env.sh"
+  if [[ -z "${HADOOP_YARN_ENV_PROCESSED}" ]]; then
+    if [[ -e "${YARN_CONF_DIR}/yarn-env.sh" ]]; then
+      . "${YARN_CONF_DIR}/yarn-env.sh"
+    elif [[ -e "${HADOOP_CONF_DIR}/yarn-env.sh" ]]; then
+      . "${HADOOP_CONF_DIR}/yarn-env.sh"
+    fi
+    export HADOOP_YARN_ENV_PROCESSED=true
   fi
   fi
   
   
   if [[ -n "${YARN_CONF_DIR}" ]]; then
   if [[ -n "${YARN_CONF_DIR}" ]]; then

+ 35 - 17
hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh

@@ -1,3 +1,4 @@
+#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
 # this work for additional information regarding copyright ownership.
@@ -25,27 +26,21 @@
 ##
 ##
 
 
 ###
 ###
-# Generic settings for YARN
+# YARN-specific overrides for generic settings
 ###
 ###
 
 
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-#
-# By default, YARN will use HADOOP_CONF_DIR. Specify a custom
-# YARN_CONF_DIR here
-# export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-#
-
-# Override Hadoop's log directory & file
-# export YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-# export YARN_LOGFILE='yarn.log'
+# By default, YARN will use HADOOP_LOG_DIR for YARN logging.  Specify a custom
+# log directory for YARN things here:
+# export YARN_LOG_DIR="${HADOOP_LOG_DIR}"
 
 
-# Need a custom-to-YARN service-level authorization policy file?
-# export YARN_POLICYFILE="yarn-policy.xml"
+# By default, YARN will use the value of HADOOP_LOGFILE as the 'fallback' log
+# file # when log4j settings are not defined.  Specify a custom YARN log file
+# here:
+# export YARN_LOGFILE=${HADOOP_LOGFILE}
 
 
-#Override the log4j settings for all YARN apps
-# export YARN_ROOT_LOGGER="INFO,console"
+#Override the log4j settings for all YARN apps By default, YARN will use
+# HADOOP_ROOT_LOGGER.
+# export YARN_ROOT_LOGGER=${HADOOP_ROOT_LOGGER}
 
 
 ###
 ###
 # Resource Manager specific parameters
 # Resource Manager specific parameters
@@ -125,3 +120,26 @@ export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 #
 #
 #export YARN_TIMELINESERVER_OPTS=
 #export YARN_TIMELINESERVER_OPTS=
 
 
+###
+# Web App Proxy Server specifc parameters
+###
+
+# Specify the max Heapsize for the proxy server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS,
+# HADOOP_OPTS, and/or YARN_PROXYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#
+#export YARN_PROXYSERVER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the proxy server.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#
+# See ResourceManager for some examples
+#
+#export YARN_PROXYSERVER_OPTS=
+
+

+ 14 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -902,6 +902,16 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH =
   public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH =
     NM_PREFIX + "linux-container-executor.cgroups.mount-path";
     NM_PREFIX + "linux-container-executor.cgroups.mount-path";
 
 
+  /**
+   * Whether the apps should run in strict resource usage mode(not allowed to
+   * use spare CPU)
+   */
+  public static final String NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
+      NM_PREFIX + "linux-container-executor.cgroups.strict-resource-usage";
+  public static final boolean DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
+      false;
+
+
 
 
   /**
   /**
    * Interval of time the linux container executor should try cleaning up
    * Interval of time the linux container executor should try cleaning up
@@ -991,6 +1001,10 @@ public class YarnConfiguration extends Configuration {
   YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER =
   YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER =
       "security.resourcelocalizer.protocol.acl";
       "security.resourcelocalizer.protocol.acl";
 
 
+  public static final String
+  YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL =
+      "security.applicationhistory.protocol.acl";
+
   /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL
   /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL
    * to a running container */
    * to a running container */
   public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =
   public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =

+ 17 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java

@@ -75,7 +75,6 @@ import org.apache.hadoop.yarn.client.api.YarnClientApplication;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
  * Client for Distributed Shell application submission to YARN.
  * Client for Distributed Shell application submission to YARN.
@@ -163,6 +162,8 @@ public class Client {
   // flag to indicate whether to keep containers across application attempts.
   // flag to indicate whether to keep containers across application attempts.
   private boolean keepContainers = false;
   private boolean keepContainers = false;
 
 
+  private long attemptFailuresValidityInterval = -1;
+
   // Debug flag
   // Debug flag
   boolean debugFlag = false;	
   boolean debugFlag = false;	
 
 
@@ -248,6 +249,12 @@ public class Client {
       " If the flag is true, running containers will not be killed when" +
       " If the flag is true, running containers will not be killed when" +
       " application attempt fails and these containers will be retrieved by" +
       " application attempt fails and these containers will be retrieved by" +
       " the new application attempt ");
       " the new application attempt ");
+    opts.addOption("attempt_failures_validity_interval", true,
+      "when attempt_failures_validity_interval in milliseconds is set to > 0," +
+      "the failure number will not take failures which happen out of " +
+      "the validityInterval into failure count. " +
+      "If failure count reaches to maxAppAttempts, " +
+      "the application will be failed.");
     opts.addOption("debug", false, "Dump out debug information");
     opts.addOption("debug", false, "Dump out debug information");
     opts.addOption("help", false, "Print usage");
     opts.addOption("help", false, "Print usage");
 
 
@@ -372,6 +379,10 @@ public class Client {
 
 
     clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000"));
     clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000"));
 
 
+    attemptFailuresValidityInterval =
+        Long.parseLong(cliParser.getOptionValue(
+          "attempt_failures_validity_interval", "-1"));
+
     log4jPropFile = cliParser.getOptionValue("log_properties", "");
     log4jPropFile = cliParser.getOptionValue("log_properties", "");
 
 
     return true;
     return true;
@@ -456,6 +467,11 @@ public class Client {
     appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
     appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
     appContext.setApplicationName(appName);
     appContext.setApplicationName(appName);
 
 
+    if (attemptFailuresValidityInterval >= 0) {
+      appContext
+        .setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
+    }
+
     // set local resources for the application master
     // set local resources for the application master
     // local files or archives as needed
     // local files or archives as needed
     // In this scenario, the jar file for the application master is part of the local resources			
     // In this scenario, the jar file for the application master is part of the local resources			

+ 58 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSSleepingAppMaster.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.applications.distributedshell;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class TestDSSleepingAppMaster extends ApplicationMaster{
+
+  private static final Log LOG = LogFactory.getLog(TestDSSleepingAppMaster.class);
+  private static final long SLEEP_TIME = 5000;
+
+  public static void main(String[] args) {
+    boolean result = false;
+    try {
+      TestDSSleepingAppMaster appMaster = new TestDSSleepingAppMaster();
+      boolean doRun = appMaster.init(args);
+      if (!doRun) {
+        System.exit(0);
+      }
+      appMaster.run();
+      if (appMaster.appAttemptID.getAttemptId() <= 2) {
+        try {
+          // sleep some time
+          Thread.sleep(SLEEP_TIME);
+        } catch (InterruptedException e) {}
+        // fail the first am.
+        System.exit(100);
+      }
+      result = appMaster.finish();
+    } catch (Throwable t) {
+      System.exit(1);
+    }
+    if (result) {
+      LOG.info("Application Master completed successfully. exiting");
+      System.exit(0);
+    } else {
+      LOG.info("Application Master failed. exiting");
+      System.exit(2);
+    }
+  }
+}

+ 76 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java

@@ -308,6 +308,82 @@ public class TestDistributedShell {
       Assert.assertTrue(result);
       Assert.assertTrue(result);
     }
     }
 
 
+  /*
+   * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds.
+   * Set attempt_failures_validity_interval as 2.5 seconds. It will check
+   * how many attempt failures for previous 2.5 seconds.
+   * The application is expected to be successful.
+   */
+  @Test(timeout=90000)
+  public void testDSAttemptFailuresValidityIntervalSucess() throws Exception {
+    String[] args = {
+        "--jar",
+        APPMASTER_JAR,
+        "--num_containers",
+        "1",
+        "--shell_command",
+        "sleep 8",
+        "--master_memory",
+        "512",
+        "--container_memory",
+        "128",
+        "--attempt_failures_validity_interval",
+        "2500"
+      };
+
+      LOG.info("Initializing DS Client");
+      Configuration conf = yarnCluster.getConfig();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+      Client client = new Client(TestDSSleepingAppMaster.class.getName(),
+        new Configuration(conf));
+
+      client.init(args);
+      LOG.info("Running DS Client");
+      boolean result = client.run();
+
+      LOG.info("Client run completed. Result=" + result);
+      // application should succeed
+      Assert.assertTrue(result);
+    }
+
+  /*
+   * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds.
+   * Set attempt_failures_validity_interval as 15 seconds. It will check
+   * how many attempt failure for previous 15 seconds.
+   * The application is expected to be fail.
+   */
+  @Test(timeout=90000)
+  public void testDSAttemptFailuresValidityIntervalFailed() throws Exception {
+    String[] args = {
+        "--jar",
+        APPMASTER_JAR,
+        "--num_containers",
+        "1",
+        "--shell_command",
+        "sleep 8",
+        "--master_memory",
+        "512",
+        "--container_memory",
+        "128",
+        "--attempt_failures_validity_interval",
+        "15000"
+      };
+
+      LOG.info("Initializing DS Client");
+      Configuration conf = yarnCluster.getConfig();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+      Client client = new Client(TestDSSleepingAppMaster.class.getName(),
+        new Configuration(conf));
+
+      client.init(args);
+      LOG.info("Running DS Client");
+      boolean result = client.run();
+
+      LOG.info("Client run completed. Result=" + result);
+      // application should be failed
+      Assert.assertFalse(result);
+    }
+
   @Test(timeout=90000)
   @Test(timeout=90000)
   public void testDSShellWithCustomLogPropertyFile() throws Exception {
   public void testDSShellWithCustomLogPropertyFile() throws Exception {
     final File basedir =
     final File basedir =

+ 10 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -1039,6 +1039,16 @@
     <value>^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$</value>
     <value>^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$</value>
   </property>
   </property>
 
 
+  <property>
+    <description>This flag determines whether apps should run with strict resource limits
+    or be allowed to consume spare resources if they need them. For example, turning the
+    flag on will restrict apps to use only their share of CPU, even if the node has spare
+    CPU cycles. The default value is false i.e. use available resources. Please note that
+    turning this flag on may reduce job throughput on the cluster.</description>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+  </property>
+
   <property>
   <property>
     <description>T-file compression types used to compress aggregated logs.</description>
     <description>T-file compression types used to compress aggregated logs.</description>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>

+ 14 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java

@@ -26,7 +26,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
 import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -56,8 +58,8 @@ import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
 
 
 public class ApplicationHistoryClientService extends AbstractService {
 public class ApplicationHistoryClientService extends AbstractService {
   private static final Log LOG = LogFactory
   private static final Log LOG = LogFactory
@@ -88,6 +90,12 @@ public class ApplicationHistoryClientService extends AbstractService {
             YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
             YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
             YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
             YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
 
 
+    // Enable service authorization?
+    if (conf.getBoolean(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      refreshServiceAcls(conf, new TimelinePolicyProvider());
+    }
+
     server.start();
     server.start();
     this.bindAddress =
     this.bindAddress =
         conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
         conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
@@ -118,6 +126,11 @@ public class ApplicationHistoryClientService extends AbstractService {
     return this.bindAddress;
     return this.bindAddress;
   }
   }
 
 
+  private void refreshServiceAcls(Configuration configuration,
+      PolicyProvider policyProvider) {
+    this.server.refreshServiceAcl(configuration, policyProvider);
+  }
+
   private class ApplicationHSClientProtocolHandler implements
   private class ApplicationHSClientProtocolHandler implements
       ApplicationHistoryProtocol {
       ApplicationHistoryProtocol {
 
 

+ 14 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/authorize/TimelinePolicyProvider.java

@@ -16,38 +16,29 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.IOException;
+package org.apache.hadoop.yarn.server.timeline.security.authorize;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.BatchedRemoteIterator;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 
 /**
 /**
- * Used on the client-side to iterate over the list of encryption zones
- * stored on the namenode.
+ * {@link PolicyProvider} for YARN timeline server protocols.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class EncryptionZoneWithIdIterator
-    extends BatchedRemoteIterator<Long, EncryptionZoneWithId> {
-
-  private final ClientProtocol namenode;
-
-  EncryptionZoneWithIdIterator(ClientProtocol namenode) {
-    super(Long.valueOf(0));
-    this.namenode = namenode;
-  }
+@InterfaceStability.Unstable
+public class TimelinePolicyProvider extends PolicyProvider {
 
 
   @Override
   @Override
-  public BatchedEntries<EncryptionZoneWithId> makeRequest(Long prevId)
-      throws IOException {
-    return namenode.listEncryptionZones(prevId);
+  public Service[] getServices() {
+    return new Service[] {
+        new Service(
+            YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL,
+            ApplicationHistoryProtocolPB.class)
+    };
   }
   }
 
 
-  @Override
-  public Long elementToPrevKey(EncryptionZoneWithId entry) {
-    return entry.getId();
-  }
 }
 }

+ 9 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml

@@ -102,11 +102,6 @@
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-annotations</artifactId>
       <artifactId>hadoop-annotations</artifactId>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
@@ -122,11 +117,20 @@
       <groupId>com.google.protobuf</groupId>
       <groupId>com.google.protobuf</groupId>
       <artifactId>protobuf-java</artifactId>
       <artifactId>protobuf-java</artifactId>
     </dependency>
     </dependency>
+    <!--
+    junit must be before mockito-all on the classpath.  mockito-all bundles its
+    own copy of the hamcrest classes, but they don't match our junit version.
+    -->
     <dependency>
     <dependency>
       <groupId>junit</groupId>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>com.google.inject</groupId>
       <groupId>com.google.inject</groupId>
       <artifactId>guice</artifactId>
       <artifactId>guice</artifactId>

+ 26 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java

@@ -57,6 +57,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
   private String cgroupMountPath;
   private String cgroupMountPath;
 
 
   private boolean cpuWeightEnabled = true;
   private boolean cpuWeightEnabled = true;
+  private boolean strictResourceUsageMode = false;
 
 
   private final String MTAB_FILE = "/proc/mounts";
   private final String MTAB_FILE = "/proc/mounts";
   private final String CGROUPS_FSTYPE = "cgroup";
   private final String CGROUPS_FSTYPE = "cgroup";
@@ -71,6 +72,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
   private long deleteCgroupTimeout;
   private long deleteCgroupTimeout;
   // package private for testing purposes
   // package private for testing purposes
   Clock clock;
   Clock clock;
+
+  private float yarnProcessors;
   
   
   public CgroupsLCEResourcesHandler() {
   public CgroupsLCEResourcesHandler() {
     this.controllerPaths = new HashMap<String, String>();
     this.controllerPaths = new HashMap<String, String>();
@@ -105,6 +108,12 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
       cgroupPrefix = cgroupPrefix.substring(1);
       cgroupPrefix = cgroupPrefix.substring(1);
     }
     }
 
 
+    this.strictResourceUsageMode =
+        conf
+          .getBoolean(
+            YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE,
+            YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE);
+
     int len = cgroupPrefix.length();
     int len = cgroupPrefix.length();
     if (cgroupPrefix.charAt(len - 1) == '/') {
     if (cgroupPrefix.charAt(len - 1) == '/') {
       cgroupPrefix = cgroupPrefix.substring(0, len - 1);
       cgroupPrefix = cgroupPrefix.substring(0, len - 1);
@@ -132,8 +141,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     initializeControllerPaths();
     initializeControllerPaths();
 
 
     // cap overall usage to the number of cores allocated to YARN
     // cap overall usage to the number of cores allocated to YARN
-    float yarnProcessors =
-        NodeManagerHardwareUtils.getContainersCores(plugin, conf);
+    yarnProcessors = NodeManagerHardwareUtils.getContainersCores(plugin, conf);
     int systemProcessors = plugin.getNumProcessors();
     int systemProcessors = plugin.getNumProcessors();
     if (systemProcessors != (int) yarnProcessors) {
     if (systemProcessors != (int) yarnProcessors) {
       LOG.info("YARN containers restricted to " + yarnProcessors + " cores");
       LOG.info("YARN containers restricted to " + yarnProcessors + " cores");
@@ -290,10 +298,25 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     String containerName = containerId.toString();
     String containerName = containerId.toString();
 
 
     if (isCpuWeightEnabled()) {
     if (isCpuWeightEnabled()) {
+      int containerVCores = containerResource.getVirtualCores();
       createCgroup(CONTROLLER_CPU, containerName);
       createCgroup(CONTROLLER_CPU, containerName);
-      int cpuShares = CPU_DEFAULT_WEIGHT * containerResource.getVirtualCores();
+      int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores;
       updateCgroup(CONTROLLER_CPU, containerName, "shares",
       updateCgroup(CONTROLLER_CPU, containerName, "shares",
           String.valueOf(cpuShares));
           String.valueOf(cpuShares));
+      if (strictResourceUsageMode) {
+        int nodeVCores =
+            conf.getInt(YarnConfiguration.NM_VCORES,
+              YarnConfiguration.DEFAULT_NM_VCORES);
+        if (nodeVCores != containerVCores) {
+          float containerCPU =
+              (containerVCores * yarnProcessors) / (float) nodeVCores;
+          int[] limits = getOverallLimits(containerCPU);
+          updateCgroup(CONTROLLER_CPU, containerName, CPU_PERIOD_US,
+            String.valueOf(limits[0]));
+          updateCgroup(CONTROLLER_CPU, containerName, CPU_QUOTA_US,
+            String.valueOf(limits[1]));
+        }
+      }
     }
     }
   }
   }
 
 

+ 139 - 26
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.util;
 package org.apache.hadoop.yarn.server.nodemanager.util;
 
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
 import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
 import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -86,9 +88,13 @@ public class TestCgroupsLCEResourcesHandler {
 
 
     String mtabFile;
     String mtabFile;
     int[] limits = new int[2];
     int[] limits = new int[2];
+    boolean generateLimitsMode = false;
 
 
     @Override
     @Override
     int[] getOverallLimits(float x) {
     int[] getOverallLimits(float x) {
+      if (generateLimitsMode == true) {
+        return super.getOverallLimits(x);
+      }
       return limits;
       return limits;
     }
     }
 
 
@@ -116,32 +122,11 @@ public class TestCgroupsLCEResourcesHandler {
     handler.initConfig();
     handler.initConfig();
 
 
     // create mock cgroup
     // create mock cgroup
-    File cgroupDir = new File("target", UUID.randomUUID().toString());
-    if (!cgroupDir.mkdir()) {
-      String message = "Could not create dir " + cgroupDir.getAbsolutePath();
-      throw new IOException(message);
-    }
-    File cgroupMountDir = new File(cgroupDir.getAbsolutePath(), "hadoop-yarn");
-    if (!cgroupMountDir.mkdir()) {
-      String message =
-          "Could not create dir " + cgroupMountDir.getAbsolutePath();
-      throw new IOException(message);
-    }
+    File cgroupDir = createMockCgroup();
+    File cgroupMountDir = createMockCgroupMount(cgroupDir);
 
 
     // create mock mtab
     // create mock mtab
-    String mtabContent =
-        "none " + cgroupDir.getAbsolutePath() + " cgroup rw,relatime,cpu 0 0";
-    File mockMtab = new File("target", UUID.randomUUID().toString());
-    if (!mockMtab.exists()) {
-      if (!mockMtab.createNewFile()) {
-        String message = "Could not create file " + mockMtab.getAbsolutePath();
-        throw new IOException(message);
-      }
-    }
-    FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
-    mtabWriter.write(mtabContent);
-    mtabWriter.close();
-    mockMtab.deleteOnExit();
+    File mockMtab = createMockMTab(cgroupDir);
 
 
     // setup our handler and call init()
     // setup our handler and call init()
     handler.setMtabFile(mockMtab.getAbsolutePath());
     handler.setMtabFile(mockMtab.getAbsolutePath());
@@ -156,7 +141,8 @@ public class TestCgroupsLCEResourcesHandler {
     Assert.assertFalse(quotaFile.exists());
     Assert.assertFalse(quotaFile.exists());
 
 
     // subset of cpu being used, files should be created
     // subset of cpu being used, files should be created
-    conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
+    conf
+      .setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
     handler.limits[0] = 100 * 1000;
     handler.limits[0] = 100 * 1000;
     handler.limits[1] = 1000 * 1000;
     handler.limits[1] = 1000 * 1000;
     handler.init(mockLCE, plugin);
     handler.init(mockLCE, plugin);
@@ -166,7 +152,8 @@ public class TestCgroupsLCEResourcesHandler {
     Assert.assertEquals(1000 * 1000, quota);
     Assert.assertEquals(1000 * 1000, quota);
 
 
     // set cpu back to 100, quota should be -1
     // set cpu back to 100, quota should be -1
-    conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 100);
+    conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
+      100);
     handler.limits[0] = 100 * 1000;
     handler.limits[0] = 100 * 1000;
     handler.limits[1] = 1000 * 1000;
     handler.limits[1] = 1000 * 1000;
     handler.init(mockLCE, plugin);
     handler.init(mockLCE, plugin);
@@ -213,4 +200,130 @@ public class TestCgroupsLCEResourcesHandler {
     Assert.assertEquals(1000 * 1000, ret[0]);
     Assert.assertEquals(1000 * 1000, ret[0]);
     Assert.assertEquals(-1, ret[1]);
     Assert.assertEquals(-1, ret[1]);
   }
   }
+
+  private File createMockCgroup() throws IOException {
+    File cgroupDir = new File("target", UUID.randomUUID().toString());
+    if (!cgroupDir.mkdir()) {
+      String message = "Could not create dir " + cgroupDir.getAbsolutePath();
+      throw new IOException(message);
+    }
+    return cgroupDir;
+  }
+
+  private File createMockCgroupMount(File cgroupDir) throws IOException {
+    File cgroupMountDir = new File(cgroupDir.getAbsolutePath(), "hadoop-yarn");
+    if (!cgroupMountDir.mkdir()) {
+      String message =
+          "Could not create dir " + cgroupMountDir.getAbsolutePath();
+      throw new IOException(message);
+    }
+    return cgroupMountDir;
+  }
+
+  private File createMockMTab(File cgroupDir) throws IOException {
+    String mtabContent =
+        "none " + cgroupDir.getAbsolutePath() + " cgroup rw,relatime,cpu 0 0";
+    File mockMtab = new File("target", UUID.randomUUID().toString());
+    if (!mockMtab.exists()) {
+      if (!mockMtab.createNewFile()) {
+        String message = "Could not create file " + mockMtab.getAbsolutePath();
+        throw new IOException(message);
+      }
+    }
+    FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
+    mtabWriter.write(mtabContent);
+    mtabWriter.close();
+    mockMtab.deleteOnExit();
+    return mockMtab;
+  }
+
+  @Test
+  public void testContainerLimits() throws IOException {
+    LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
+    CustomCgroupsLCEResourceHandler handler =
+        new CustomCgroupsLCEResourceHandler();
+    handler.generateLimitsMode = true;
+    YarnConfiguration conf = new YarnConfiguration();
+    final int numProcessors = 4;
+    ResourceCalculatorPlugin plugin =
+        Mockito.mock(ResourceCalculatorPlugin.class);
+    Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
+    handler.setConf(conf);
+    handler.initConfig();
+
+    // create mock cgroup
+    File cgroupDir = createMockCgroup();
+    File cgroupMountDir = createMockCgroupMount(cgroupDir);
+
+    // create mock mtab
+    File mockMtab = createMockMTab(cgroupDir);
+
+    // setup our handler and call init()
+    handler.setMtabFile(mockMtab.getAbsolutePath());
+    handler.init(mockLCE, plugin);
+
+    // check values
+    // default case - files shouldn't exist, strict mode off by default
+    ContainerId id = ContainerId.fromString("container_1_1_1_1");
+    handler.preExecute(id, Resource.newInstance(1024, 1));
+    File containerDir = new File(cgroupMountDir, id.toString());
+    Assert.assertTrue(containerDir.exists());
+    Assert.assertTrue(containerDir.isDirectory());
+    File periodFile = new File(containerDir, "cpu.cfs_period_us");
+    File quotaFile = new File(containerDir, "cpu.cfs_quota_us");
+    Assert.assertFalse(periodFile.exists());
+    Assert.assertFalse(quotaFile.exists());
+
+    // no files created because we're using all cpu
+    FileUtils.deleteQuietly(containerDir);
+    conf.setBoolean(
+      YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
+    handler.initConfig();
+    handler.preExecute(id,
+      Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES));
+    Assert.assertTrue(containerDir.exists());
+    Assert.assertTrue(containerDir.isDirectory());
+    periodFile = new File(containerDir, "cpu.cfs_period_us");
+    quotaFile = new File(containerDir, "cpu.cfs_quota_us");
+    Assert.assertFalse(periodFile.exists());
+    Assert.assertFalse(quotaFile.exists());
+
+    // 50% of CPU
+    FileUtils.deleteQuietly(containerDir);
+    conf.setBoolean(
+      YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
+    handler.initConfig();
+    handler.preExecute(id,
+      Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
+    Assert.assertTrue(containerDir.exists());
+    Assert.assertTrue(containerDir.isDirectory());
+    periodFile = new File(containerDir, "cpu.cfs_period_us");
+    quotaFile = new File(containerDir, "cpu.cfs_quota_us");
+    Assert.assertTrue(periodFile.exists());
+    Assert.assertTrue(quotaFile.exists());
+    Assert.assertEquals(500 * 1000, readIntFromFile(periodFile));
+    Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
+
+    // CGroups set to 50% of CPU, container set to 50% of YARN CPU
+    FileUtils.deleteQuietly(containerDir);
+    conf.setBoolean(
+      YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
+    conf
+      .setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
+    handler.initConfig();
+    handler.init(mockLCE, plugin);
+    handler.preExecute(id,
+      Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
+    Assert.assertTrue(containerDir.exists());
+    Assert.assertTrue(containerDir.isDirectory());
+    periodFile = new File(containerDir, "cpu.cfs_period_us");
+    quotaFile = new File(containerDir, "cpu.cfs_quota_us");
+    Assert.assertTrue(periodFile.exists());
+    Assert.assertTrue(quotaFile.exists());
+    Assert.assertEquals(1000 * 1000, readIntFromFile(periodFile));
+    Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
+
+    FileUtils.deleteQuietly(cgroupDir);
+  }
+
 }
 }