Parcourir la source

Merge from trunk to HDFS-2006 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2006@1593948 13f79535-47bb-0310-9956-ffa450edef68
Uma Maheswara Rao G il y a 11 ans
Parent
commit
6e55f3b4ba
100 fichiers modifiés avec 6325 ajouts et 1095 suppressions
  1. 1 0
      .gitignore
  2. 52 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
  3. 0 28
      hadoop-client/pom.xml
  4. 56 36
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  5. 14 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  6. 69 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  7. 16 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  8. 43 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  9. 6 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  10. 0 20
      hadoop-common-project/hadoop-common/pom.xml
  11. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  12. 519 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  13. 53 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  14. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  15. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  16. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
  17. 10 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
  18. 56 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMappingWithDependency.java
  19. 16 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
  20. 178 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
  21. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
  22. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  23. 64 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
  24. 15 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  25. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
  26. 1 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.crypto.key.KeyProviderFactory
  27. 8 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  28. 15 5
      hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
  29. 20 2
      hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
  30. 23 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathExceptions.java
  31. 0 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  32. 86 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
  33. 70 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
  34. 18 13
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
  35. 41 0
      hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
  36. 408 0
      hadoop-common-project/hadoop-kms/pom.xml
  37. 82 0
      hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
  38. 45 0
      hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
  39. 38 0
      hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
  40. 71 0
      hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
  41. 305 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  42. 133 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
  43. 62 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
  44. 123 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
  45. 180 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java
  46. 94 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  47. 113 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
  48. 54 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java
  49. 70 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
  50. 92 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
  51. 80 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
  52. 214 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  53. 181 0
      hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
  54. 60 0
      hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
  55. 2 3
      hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/WEB-INF/web.xml
  56. 16 6
      hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/index.html
  57. 67 0
      hadoop-common-project/hadoop-kms/src/main/tomcat/logging.properties
  58. 153 0
      hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml
  59. 135 0
      hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml
  60. 78 0
      hadoop-common-project/hadoop-kms/src/main/webapp/WEB-INF/web.xml
  61. 487 0
      hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
  62. 29 0
      hadoop-common-project/hadoop-kms/src/site/resources/css/site.css
  63. 17 5
      hadoop-common-project/hadoop-kms/src/site/site.xml
  64. 806 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
  65. 47 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
  66. 120 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSCacheKeyProvider.java
  67. 31 0
      hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
  68. 25 12
      hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
  69. 1 0
      hadoop-common-project/pom.xml
  70. 1 0
      hadoop-dist/pom.xml
  71. 0 48
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  72. 0 10
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  73. 45 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  74. 6 155
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  75. 20 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  76. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  77. 8 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  78. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
  79. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
  80. 17 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  81. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  82. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
  83. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
  84. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  85. 12 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  86. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  87. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  88. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  89. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  90. 7 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  91. 35 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  92. 72 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  93. 38 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
  94. 21 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java
  95. 10 562
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  96. 18 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  97. 40 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  98. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
  99. 19 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  100. 9 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

+ 1 - 0
.gitignore

@@ -7,5 +7,6 @@
 .project
 .settings
 target
+hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads

+ 52 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-kms-dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <!-- Configuration files -->
+    <fileSet>
+      <directory>${basedir}/src/main/conf</directory>
+      <outputDirectory>/etc/hadoop</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sbin</directory>
+      <outputDirectory>/sbin</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/libexec</directory>
+      <outputDirectory>/libexec</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <!-- Documentation -->
+    <fileSet>
+      <directory>${project.build.directory}/site</directory>
+      <outputDirectory>/share/doc/hadoop/kms</outputDirectory>
+    </fileSet>
+  </fileSets>
+</assembly>

+ 0 - 28
hadoop-client/pom.xml

@@ -39,22 +39,10 @@
       <artifactId>hadoop-common</artifactId>
       <scope>compile</scope>
       <exclusions>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>commons-logging</groupId>
           <artifactId>commons-logging-api</artifactId>
@@ -71,10 +59,6 @@
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty-util</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>servlet-api-2.5</artifactId>
@@ -111,10 +95,6 @@
           <groupId>com.jcraft</groupId>
           <artifactId>jsch</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>commons-el</groupId>
-          <artifactId>commons-el</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
 
@@ -147,14 +127,6 @@
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
 

+ 56 - 36
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -34,16 +34,18 @@ import javax.security.auth.login.LoginException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.File;
 import java.io.IOException;
-import java.security.Principal;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
+import java.util.regex.Pattern;
 
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
@@ -140,10 +142,10 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   public static final String NAME_RULES = TYPE + ".name.rules";
 
-  private String principal;
   private String keytab;
   private GSSManager gssManager;
-  private LoginContext loginContext;
+  private Subject serverSubject = new Subject();
+  private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
 
   /**
    * Initializes the authentication handler instance.
@@ -159,7 +161,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   @Override
   public void init(Properties config) throws ServletException {
     try {
-      principal = config.getProperty(PRINCIPAL, principal);
+      String principal = config.getProperty(PRINCIPAL);
       if (principal == null || principal.trim().length() == 0) {
         throw new ServletException("Principal not defined in configuration");
       }
@@ -170,23 +172,40 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       if (!new File(keytab).exists()) {
         throw new ServletException("Keytab does not exist: " + keytab);
       }
+      
+      // use all SPNEGO principals in the keytab if a principal isn't
+      // specifically configured
+      final String[] spnegoPrincipals;
+      if (principal.equals("*")) {
+        spnegoPrincipals = KerberosUtil.getPrincipalNames(
+            keytab, Pattern.compile("HTTP/.*"));
+        if (spnegoPrincipals.length == 0) {
+          throw new ServletException("Principals do not exist in the keytab");
+        }
+      } else {
+        spnegoPrincipals = new String[]{principal};
+      }
 
       String nameRules = config.getProperty(NAME_RULES, null);
       if (nameRules != null) {
         KerberosName.setRules(nameRules);
       }
       
-      Set<Principal> principals = new HashSet<Principal>();
-      principals.add(new KerberosPrincipal(principal));
-      Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
-
-      KerberosConfiguration kerberosConfiguration = new KerberosConfiguration(keytab, principal);
-
-      LOG.info("Login using keytab "+keytab+", for principal "+principal);
-      loginContext = new LoginContext("", subject, null, kerberosConfiguration);
-      loginContext.login();
-
-      Subject serverSubject = loginContext.getSubject();
+      for (String spnegoPrincipal : spnegoPrincipals) {
+        LOG.info("Login using keytab {}, for principal {}",
+            keytab, principal);
+        final KerberosConfiguration kerberosConfiguration =
+            new KerberosConfiguration(keytab, spnegoPrincipal);
+        final LoginContext loginContext =
+            new LoginContext("", serverSubject, null, kerberosConfiguration);
+        try {
+          loginContext.login();
+        } catch (LoginException le) {
+          LOG.warn("Failed to login as [{}]", spnegoPrincipal, le);
+          throw new AuthenticationException(le);          
+        }
+        loginContexts.add(loginContext);
+      }
       try {
         gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {
 
@@ -198,7 +217,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       } catch (PrivilegedActionException ex) {
         throw ex.getException();
       }
-      LOG.info("Initialized, principal [{}] from keytab [{}]", principal, keytab);
     } catch (Exception ex) {
       throw new ServletException(ex);
     }
@@ -211,14 +229,16 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public void destroy() {
-    try {
-      if (loginContext != null) {
+    keytab = null;
+    serverSubject = null;
+    for (LoginContext loginContext : loginContexts) {
+      try {
         loginContext.logout();
-        loginContext = null;
+      } catch (LoginException ex) {
+        LOG.warn(ex.getMessage(), ex);
       }
-    } catch (LoginException ex) {
-      LOG.warn(ex.getMessage(), ex);
     }
+    loginContexts.clear();
   }
 
   /**
@@ -233,12 +253,12 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   }
 
   /**
-   * Returns the Kerberos principal used by the authentication handler.
+   * Returns the Kerberos principals used by the authentication handler.
    *
-   * @return the Kerberos principal used by the authentication handler.
+   * @return the Kerberos principals used by the authentication handler.
    */
-  protected String getPrincipal() {
-    return principal;
+  protected Set<KerberosPrincipal> getPrincipals() {
+    return serverSubject.getPrincipals(KerberosPrincipal.class);
   }
 
   /**
@@ -304,7 +324,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
-      Subject serverSubject = loginContext.getSubject();
+      final String serverName = request.getServerName();
       try {
         token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
 
@@ -314,15 +334,15 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
             GSSContext gssContext = null;
             GSSCredential gssCreds = null;
             try {
-              if (IBM_JAVA) {
-                // IBM JDK needs non-null credentials to be passed to createContext here, with
-                // SPNEGO mechanism specified, otherwise JGSS will use its default mechanism
-                // only, which is Kerberos V5.
-                gssCreds = gssManager.createCredential(null, GSSCredential.INDEFINITE_LIFETIME,
-                    new Oid[]{KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
-                        KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
-                    GSSCredential.ACCEPT_ONLY);
-              }
+              gssCreds = gssManager.createCredential(
+                  gssManager.createName(
+                      KerberosUtil.getServicePrincipal("HTTP", serverName),
+                      KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+                  GSSCredential.INDEFINITE_LIFETIME,
+                  new Oid[]{
+                    KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
+                    KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
+                  GSSCredential.ACCEPT_ONLY);
               gssContext = gssManager.createContext(gssCreds);
               byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
               if (serverToken != null && serverToken.length > 0) {

+ 14 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.security.authentication.util;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Locale;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -66,7 +67,7 @@ public class KerberosName {
    */
   private static final Pattern ruleParser =
     Pattern.compile("\\s*((DEFAULT)|(RULE:\\[(\\d*):([^\\]]*)](\\(([^)]*)\\))?"+
-                    "(s/([^/]*)/([^/]*)/(g)?)?))");
+                    "(s/([^/]*)/([^/]*)/(g)?)?))/?(L)?");
 
   /**
    * A pattern that recognizes simple/non-simple names.
@@ -171,6 +172,7 @@ public class KerberosName {
     private final Pattern fromPattern;
     private final String toPattern;
     private final boolean repeat;
+    private final boolean toLowerCase;
 
     Rule() {
       isDefault = true;
@@ -180,10 +182,11 @@ public class KerberosName {
       fromPattern = null;
       toPattern = null;
       repeat = false;
+      toLowerCase = false;
     }
 
     Rule(int numOfComponents, String format, String match, String fromPattern,
-         String toPattern, boolean repeat) {
+         String toPattern, boolean repeat, boolean toLowerCase) {
       isDefault = false;
       this.numOfComponents = numOfComponents;
       this.format = format;
@@ -192,6 +195,7 @@ public class KerberosName {
         fromPattern == null ? null : Pattern.compile(fromPattern);
       this.toPattern = toPattern;
       this.repeat = repeat;
+      this.toLowerCase = toLowerCase;
     }
 
     @Override
@@ -220,6 +224,9 @@ public class KerberosName {
             buf.append('g');
           }
         }
+        if (toLowerCase) {
+          buf.append("/L");
+        }
       }
       return buf.toString();
     }
@@ -308,6 +315,9 @@ public class KerberosName {
         throw new NoMatchingRule("Non-simple name " + result +
                                  " after auth_to_local rule " + this);
       }
+      if (toLowerCase && result != null) {
+        result = result.toLowerCase(Locale.ENGLISH);
+      }
       return result;
     }
   }
@@ -328,7 +338,8 @@ public class KerberosName {
                             matcher.group(7),
                             matcher.group(9),
                             matcher.group(10),
-                            "g".equals(matcher.group(11))));
+                            "g".equals(matcher.group(11)),
+                            "L".equals(matcher.group(12))));
       }
       remaining = remaining.substring(matcher.end());
     }

+ 69 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -18,6 +18,7 @@ import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
@@ -30,10 +31,18 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.ietf.jgss.Oid;
 
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.File;
+import java.security.Principal;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 import java.util.concurrent.Callable;
 
 public class TestKerberosAuthenticationHandler
@@ -110,8 +119,65 @@ public class TestKerberosAuthenticationHandler
 
   @Test(timeout=60000)
   public void testInit() throws Exception {
-    Assert.assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
     Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
+    Set<KerberosPrincipal> principals = handler.getPrincipals();
+    Principal expectedPrincipal =
+        new KerberosPrincipal(KerberosTestUtils.getServerPrincipal());
+    Assert.assertTrue(principals.contains(expectedPrincipal));
+    Assert.assertEquals(1, principals.size());
+  }
+
+  // dynamic configuration of HTTP principals
+  @Test(timeout=60000)
+  public void testDynamicPrincipalDiscovery() throws Exception {
+    String[] keytabUsers = new String[]{
+        "HTTP/host1", "HTTP/host2", "HTTP2/host1", "XHTTP/host"
+    };
+    String keytab = KerberosTestUtils.getKeytabFile();
+    getKdc().createPrincipal(new File(keytab), keytabUsers);
+
+    // destroy handler created in setUp()
+    handler.destroy();
+    Properties props = new Properties();
+    props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
+    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
+    handler = getNewAuthenticationHandler();
+    handler.init(props);
+
+    Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());    
+    
+    Set<KerberosPrincipal> loginPrincipals = handler.getPrincipals();
+    for (String user : keytabUsers) {
+      Principal principal = new KerberosPrincipal(
+          user + "@" + KerberosTestUtils.getRealm());
+      boolean expected = user.startsWith("HTTP/");
+      Assert.assertEquals("checking for "+user, expected, 
+          loginPrincipals.contains(principal));
+    }
+  }
+
+  // dynamic configuration of HTTP principals
+  @Test(timeout=60000)
+  public void testDynamicPrincipalDiscoveryMissingPrincipals() throws Exception {
+    String[] keytabUsers = new String[]{"hdfs/localhost"};
+    String keytab = KerberosTestUtils.getKeytabFile();
+    getKdc().createPrincipal(new File(keytab), keytabUsers);
+
+    // destroy handler created in setUp()
+    handler.destroy();
+    Properties props = new Properties();
+    props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
+    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
+    handler = getNewAuthenticationHandler();
+    try {
+      handler.init(props);
+      Assert.fail("init should have failed");
+    } catch (ServletException ex) {
+      Assert.assertEquals("Principals do not exist in the keytab",
+          ex.getCause().getMessage());
+    } catch (Throwable t) {
+      Assert.fail("wrong exception: "+t);
+    }
   }
 
   @Test(timeout=60000)
@@ -190,7 +256,8 @@ public class TestKerberosAuthenticationHandler
 
     Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION))
       .thenReturn(KerberosAuthenticator.NEGOTIATE + " " + token);
-
+    Mockito.when(request.getServerName()).thenReturn("localhost");
+    
     AuthenticationToken authToken = handler.authenticate(request, response);
 
     if (authToken != null) {

+ 16 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -91,6 +91,22 @@ public class TestKerberosName {
     checkBadTranslation("root/joe@FOO.COM");
   }
 
+  @Test
+  public void testToLowerCase() throws Exception {
+    String rules =
+        "RULE:[1:$1]/L\n" +
+        "RULE:[2:$1]/L\n" +
+        "RULE:[2:$1;$2](^.*;admin$)s/;admin$///L\n" +
+        "RULE:[2:$1;$2](^.*;guest$)s/;guest$//g/L\n" +
+        "DEFAULT";
+    KerberosName.setRules(rules);
+    KerberosName.printRules();
+    checkTranslation("Joe@FOO.COM", "joe");
+    checkTranslation("Joe/root@FOO.COM", "joe");
+    checkTranslation("Joe/admin@FOO.COM", "joe");
+    checkTranslation("Joe/guestguest@FOO.COM", "joe");
+  }
+
   @After
   public void clear() {
     System.clearProperty("java.security.krb5.realm");

+ 43 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -8,6 +8,8 @@ Trunk (Unreleased)
     FSDataOutputStream.sync() and Syncable.sync().  (szetszwo)
 
   NEW FEATURES
+
+    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
     
   IMPROVEMENTS
 
@@ -144,6 +146,8 @@ Trunk (Unreleased)
     HADOOP-10534. KeyProvider getKeysMetadata should take a list of names 
     rather than returning all keys. (omalley)
 
+    HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -361,6 +365,21 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10322. Add ability to read principal names from a keytab.
     (Benoy Antony and Daryn Sharp via kihwal)
 
+    HADOOP-10549. MAX_SUBST and varPat should be final in Configuration.java.
+    (Gera Shegalov via cnauroth)
+
+    HADOOP-10471. Reduce the visibility of constants in ProxyUsers.
+    (Benoy Antony via wheat9)
+
+    HADOOP-10556. Add toLowerCase support to auth_to_local rules 
+    for service name. (tucu)
+
+    HADOOP-10467. Enable proxyuser specification to support list of users in
+    addition to list of groups (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10158. SPNEGO should work with multiple interfaces/SPNs.
+    (daryn via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES 
@@ -422,6 +441,30 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10540. Datanode upgrade in Windows fails with hardlink error.
     (Chris Nauroth and Arpit Agarwal)
 
+    HADOOP-10508. RefreshCallQueue fails when authorization is enabled.
+    (Chris Li via wheat9)
+
+    HADOOP-10547. Give SaslPropertiesResolver.getDefaultProperties() public
+    scope. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10543. RemoteException's unwrapRemoteException method failed for
+    PathIOException. (Yongjun Zhang via atm)
+
+    HADOOP-10562. Namenode exits on exception without printing stack trace
+    in AbstractDelegationTokenSecretManager. (Arpit Agarwal)
+
+    HADOOP-10568. Add s3 server-side encryption. (David S. Wang via atm)
+
+    HADOOP-10541. InputStream in MiniKdc#initKDCServer for minikdc.ldiff is not
+    closed. (Swarnim Kulkarni via cnauroth)
+
+    HADOOP-10517. InputStream is not closed in two methods of JarFinder.
+    (Ted Yu via cnauroth)
+
+    HADOOP-10581. TestUserGroupInformation#testGetServerSideGroups fails
+    because groups stored in Set and ArrayList are compared. 
+    (Mit Desai via kihwal)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -357,4 +357,10 @@
        <Bug code="NP" />
      </Match>
 
+  <Match>
+    <Class name="org.apache.hadoop.crypto.key.kms.KMSClientProvider"/>
+    <Method name="validateResponse"/>
+    <Bug pattern="REC_CATCH_EXCEPTION"/>
+  </Match>
+
 </FindBugsFilter>

+ 0 - 20
hadoop-common-project/hadoop-common/pom.xml

@@ -119,26 +119,6 @@
       <artifactId>jersey-server</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-compiler</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-el</groupId>
-      <artifactId>commons-el</artifactId>
-      <scope>runtime</scope>
-    </dependency>
     <dependency>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -797,14 +797,16 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     reloadConfiguration();
   }
   
-  private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
-  private static int MAX_SUBST = 20;
+  private static final Pattern VAR_PATTERN =
+      Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
+
+  private static final int MAX_SUBST = 20;
 
   private String substituteVars(String expr) {
     if (expr == null) {
       return null;
     }
-    Matcher match = varPat.matcher("");
+    Matcher match = VAR_PATTERN.matcher("");
     String eval = expr;
     Set<String> evalSet = new HashSet<String>();
     for(int s=0; s<MAX_SUBST; s++) {

+ 519 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -0,0 +1,519 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.http.client.utils.URIBuilder;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.net.ssl.HttpsURLConnection;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.lang.reflect.Constructor;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.security.GeneralSecurityException;
+import java.security.NoSuchAlgorithmException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * KMS client <code>KeyProvider</code> implementation.
+ */
+@InterfaceAudience.Private
+public class KMSClientProvider extends KeyProvider {
+
+  public static final String SCHEME_NAME = "kms";
+
+  private static final String UTF8 = "UTF-8";
+
+  private static final String CONTENT_TYPE = "Content-Type";
+  private static final String APPLICATION_JSON_MIME = "application/json";
+
+  private static final String HTTP_GET = "GET";
+  private static final String HTTP_POST = "POST";
+  private static final String HTTP_PUT = "PUT";
+  private static final String HTTP_DELETE = "DELETE";
+
+  private static KeyVersion parseJSONKeyVersion(Map valueMap) {
+    KeyVersion keyVersion = null;
+    if (!valueMap.isEmpty()) {
+      byte[] material = (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD))
+          ? Base64.decodeBase64((String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD))
+          : null;
+      keyVersion = new KMSKeyVersion((String)
+          valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD), material);
+    }
+    return keyVersion;
+  }
+
+  private static Metadata parseJSONMetadata(Map valueMap) {
+    Metadata metadata = null;
+    if (!valueMap.isEmpty()) {
+      metadata = new KMSMetadata(
+          (String) valueMap.get(KMSRESTConstants.CIPHER_FIELD),
+          (Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD),
+          (String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD),
+          new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)),
+          (Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD));
+    }
+    return metadata;
+  }
+
+  private static void writeJson(Map map, OutputStream os) throws IOException {
+    Writer writer = new OutputStreamWriter(os);
+    ObjectMapper jsonMapper = new ObjectMapper();
+    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
+  }
+
+  /**
+   * The factory to create KMSClientProvider, which is used by the
+   * ServiceLoader.
+   */
+  public static class Factory extends KeyProviderFactory {
+
+    @Override
+    public KeyProvider createProvider(URI providerName, Configuration conf)
+        throws IOException {
+      if (SCHEME_NAME.equals(providerName.getScheme())) {
+        return new KMSClientProvider(providerName, conf);
+      }
+      return null;
+    }
+  }
+
+  public static <T> T checkNotNull(T o, String name)
+      throws IllegalArgumentException {
+    if (o == null) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be null");
+    }
+    return o;
+  }
+
+
+  public static String checkNotEmpty(String s, String name)
+      throws IllegalArgumentException {
+    checkNotNull(s, name);
+    if (s.isEmpty()) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be empty");
+    }
+    return s;
+  }
+
+  private String kmsUrl;
+  private SSLFactory sslFactory;
+
+  public KMSClientProvider(URI uri, Configuration conf) throws IOException {
+    Path path = unnestUri(uri);
+    URL url = path.toUri().toURL();
+    kmsUrl = createServiceURL(url);
+    if ("https".equalsIgnoreCase(url.getProtocol())) {
+      sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+      try {
+        sslFactory.init();
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+    }
+  }
+
+  private String createServiceURL(URL url) throws IOException {
+    String str = url.toExternalForm();
+    if (str.endsWith("/")) {
+      str = str.substring(0, str.length() - 1);
+    }
+    return new URL(str + KMSRESTConstants.SERVICE_VERSION + "/").
+        toExternalForm();
+  }
+
+  private URL createURL(String collection, String resource, String subResource,
+      Map<String, ?> parameters) throws IOException {
+    try {
+      StringBuilder sb = new StringBuilder();
+      sb.append(kmsUrl);
+      sb.append(collection);
+      if (resource != null) {
+        sb.append("/").append(URLEncoder.encode(resource, UTF8));
+      }
+      if (subResource != null) {
+        sb.append("/").append(subResource);
+      }
+      URIBuilder uriBuilder = new URIBuilder(sb.toString());
+      if (parameters != null) {
+        for (Map.Entry<String, ?> param : parameters.entrySet()) {
+          Object value = param.getValue();
+          if (value instanceof String) {
+            uriBuilder.addParameter(param.getKey(), (String) value);
+          } else {
+            for (String s : (String[]) value) {
+              uriBuilder.addParameter(param.getKey(), s);
+            }
+          }
+        }
+      }
+      return uriBuilder.build().toURL();
+    } catch (URISyntaxException ex) {
+      throw new IOException(ex);
+    }
+  }
+
+  private HttpURLConnection configureConnection(HttpURLConnection conn)
+      throws IOException {
+    if (sslFactory != null) {
+      HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
+      try {
+        httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+      httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
+    }
+    return conn;
+  }
+
+  private HttpURLConnection createConnection(URL url, String method)
+      throws IOException {
+    HttpURLConnection conn;
+    try {
+      AuthenticatedURL authUrl = new AuthenticatedURL(new PseudoAuthenticator(),
+          sslFactory);
+      conn = authUrl.openConnection(url, new AuthenticatedURL.Token());
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex);
+    }
+    conn.setUseCaches(false);
+    conn.setRequestMethod(method);
+    if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
+      conn.setDoOutput(true);
+    }
+    conn = configureConnection(conn);
+    return conn;
+  }
+
+  // trick, riding on generics to throw an undeclared exception
+
+  private static void throwEx(Throwable ex) {
+    KMSClientProvider.<RuntimeException>throwException(ex);
+  }
+
+  @SuppressWarnings("unchecked")
+  private static <E extends Throwable> void throwException(Throwable ex)
+      throws E {
+    throw (E) ex;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void validateResponse(HttpURLConnection conn, int expected)
+      throws IOException {
+    int status = conn.getResponseCode();
+    if (status != expected) {
+      InputStream es = null;
+      try {
+        es = conn.getErrorStream();
+        ObjectMapper mapper = new ObjectMapper();
+        Map json = mapper.readValue(es, Map.class);
+        String exClass = (String) json.get(
+            KMSRESTConstants.ERROR_EXCEPTION_JSON);
+        String exMsg = (String)
+            json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
+        Exception toThrow;
+        try {
+          ClassLoader cl = KMSClientProvider.class.getClassLoader();
+          Class klass = cl.loadClass(exClass);
+          Constructor constr = klass.getConstructor(String.class);
+          toThrow = (Exception) constr.newInstance(exMsg);
+        } catch (Exception ex) {
+          toThrow = new IOException(MessageFormat.format(
+              "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
+        }
+        throwEx(toThrow);
+      } finally {
+        if (es != null) {
+          es.close();
+        }
+      }
+    }
+  }
+
+  private static <T> T call(HttpURLConnection conn, Map jsonOutput,
+      int expectedResponse, Class<T> klass)
+      throws IOException {
+    T ret = null;
+    try {
+      if (jsonOutput != null) {
+        writeJson(jsonOutput, conn.getOutputStream());
+      }
+    } catch (IOException ex) {
+      conn.getInputStream().close();
+      throw ex;
+    }
+    validateResponse(conn, expectedResponse);
+    if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+        && klass != null) {
+      ObjectMapper mapper = new ObjectMapper();
+      InputStream is = null;
+      try {
+        is = conn.getInputStream();
+        ret = mapper.readValue(is, klass);
+      } catch (IOException ex) {
+        if (is != null) {
+          is.close();
+        }
+        throw ex;
+      } finally {
+        if (is != null) {
+          is.close();
+        }
+      }
+    }
+    return ret;
+  }
+
+  public static class KMSKeyVersion extends KeyVersion {
+    public KMSKeyVersion(String versionName, byte[] material) {
+      super(versionName, material);
+    }
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName) throws IOException {
+    checkNotEmpty(versionName, "versionName");
+    URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
+        versionName, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public List<String> getKeys() throws IOException {
+    URL url = createURL(KMSRESTConstants.KEYS_NAMES_RESOURCE, null, null,
+        null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+    return (List<String>) response;
+  }
+
+  public static class KMSMetadata extends Metadata {
+    public KMSMetadata(String cipher, int bitLength, String description,
+        Date created, int versions) {
+      super(cipher, bitLength, description, created, versions);
+    }
+  }
+
+  // breaking keyNames into sets to keep resulting URL undler 2000 chars
+  private List<String[]> createKeySets(String[] keyNames) {
+    List<String[]> list = new ArrayList<String[]>();
+    List<String> batch = new ArrayList<String>();
+    int batchLen = 0;
+    for (String name : keyNames) {
+      int additionalLen = KMSRESTConstants.KEY_OP.length() + 1 + name.length();
+      batchLen += additionalLen;
+      // topping at 1500 to account for initial URL and encoded names
+      if (batchLen > 1500) {
+        list.add(batch.toArray(new String[batch.size()]));
+        batch = new ArrayList<String>();
+        batchLen = additionalLen;
+      }
+      batch.add(name);
+    }
+    if (!batch.isEmpty()) {
+      list.add(batch.toArray(new String[batch.size()]));
+    }
+    return list;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Metadata[] getKeysMetadata(String ... keyNames) throws IOException {
+    List<Metadata> keysMetadata = new ArrayList<Metadata>();
+    List<String[]> keySets = createKeySets(keyNames);
+    for (String[] keySet : keySets) {
+      if (keyNames.length > 0) {
+        Map<String, Object> queryStr = new HashMap<String, Object>();
+        queryStr.put(KMSRESTConstants.KEY_OP, keySet);
+        URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
+            null, queryStr);
+        HttpURLConnection conn = createConnection(url, HTTP_GET);
+        List<Map> list = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+        for (Map map : list) {
+          keysMetadata.add(parseJSONMetadata(map));
+        }
+      }
+    }
+    return keysMetadata.toArray(new Metadata[keysMetadata.size()]);
+  }
+
+  private KeyVersion createKeyInternal(String name, byte[] material,
+      Options options)
+      throws NoSuchAlgorithmException, IOException {
+    checkNotEmpty(name, "name");
+    checkNotNull(options, "options");
+    Map<String, Object> jsonKey = new HashMap<String, Object>();
+    jsonKey.put(KMSRESTConstants.NAME_FIELD, name);
+    jsonKey.put(KMSRESTConstants.CIPHER_FIELD, options.getCipher());
+    jsonKey.put(KMSRESTConstants.LENGTH_FIELD, options.getBitLength());
+    if (material != null) {
+      jsonKey.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64String(material));
+    }
+    if (options.getDescription() != null) {
+      jsonKey.put(KMSRESTConstants.DESCRIPTION_FIELD,
+          options.getDescription());
+    }
+    URL url = createURL(KMSRESTConstants.KEYS_RESOURCE, null, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response = call(conn, jsonKey, HttpURLConnection.HTTP_CREATED,
+        Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, Options options)
+      throws NoSuchAlgorithmException, IOException {
+    return createKeyInternal(name, null, options);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, byte[] material, Options options)
+      throws IOException {
+    checkNotNull(material, "material");
+    try {
+      return createKeyInternal(name, material, options);
+    } catch (NoSuchAlgorithmException ex) {
+      throw new RuntimeException("It should not happen", ex);
+    }
+  }
+
+  private KeyVersion rollNewVersionInternal(String name, byte[] material)
+      throws NoSuchAlgorithmException, IOException {
+    checkNotEmpty(name, "name");
+    Map<String, String> jsonMaterial = new HashMap<String, String>();
+    if (material != null) {
+      jsonMaterial.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64String(material));
+    }
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response = call(conn, jsonMaterial,
+        HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    return rollNewVersionInternal(name, null);
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    checkNotNull(material, "material");
+    try {
+      return rollNewVersionInternal(name, material);
+    } catch (NoSuchAlgorithmException ex) {
+      throw new RuntimeException("It should not happen", ex);
+    }
+  }
+
+  @Override
+  public List<KeyVersion> getKeyVersions(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.VERSIONS_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+    List<KeyVersion> versions = null;
+    if (!response.isEmpty()) {
+      versions = new ArrayList<KeyVersion>();
+      for (Object obj : response) {
+        versions.add(parseJSONKeyVersion((Map) obj));
+      }
+    }
+    return versions;
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.METADATA_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONMetadata(response);
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_DELETE);
+    call(conn, null, HttpURLConnection.HTTP_OK, null);
+  }
+
+  @Override
+  public void flush() throws IOException {
+    // NOP
+    // the client does not keep any local state, thus flushing is not required
+    // because of the client.
+    // the server should not keep in memory state on behalf of clients either.
+  }
+
+  @VisibleForTesting
+  public static String buildVersionName(String name, int version) {
+    return KeyProvider.buildVersionName(name, version);
+  }
+
+}

+ 53 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * KMS REST and JSON constants and utility methods for the KMSServer.
+ */
+@InterfaceAudience.Private
+public class KMSRESTConstants {
+
+  public static final String SERVICE_VERSION = "/v1";
+  public static final String KEY_RESOURCE = "key";
+  public static final String KEYS_RESOURCE = "keys";
+  public static final String KEYS_METADATA_RESOURCE = KEYS_RESOURCE +
+      "/metadata";
+  public static final String KEYS_NAMES_RESOURCE = KEYS_RESOURCE + "/names";
+  public static final String KEY_VERSION_RESOURCE = "keyversion";
+  public static final String METADATA_SUB_RESOURCE = "_metadata";
+  public static final String VERSIONS_SUB_RESOURCE = "_versions";
+  public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
+
+  public static final String KEY_OP = "key";
+
+  public static final String NAME_FIELD = "name";
+  public static final String CIPHER_FIELD = "cipher";
+  public static final String LENGTH_FIELD = "length";
+  public static final String DESCRIPTION_FIELD = "description";
+  public static final String CREATED_FIELD = "created";
+  public static final String VERSIONS_FIELD = "versions";
+  public static final String MATERIAL_FIELD = "material";
+  public static final String VERSION_NAME_FIELD = "versionName";
+
+  public static final String ERROR_EXCEPTION_JSON = "exception";
+  public static final String ERROR_MESSAGE_JSON = "message";
+
+}

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -139,6 +139,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String 
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
       "security.refresh.user.mappings.protocol.acl";
+  public static final String
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE =
+      "security.refresh.callqueue.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -78,6 +78,8 @@ public class CommonConfigurationKeysPublic {
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
     "net.topology.table.file.name";
+  public static final String NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY = 
+    "net.topology.dependency.script.file.name";
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  FS_TRASH_CHECKPOINT_INTERVAL_KEY =

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java

@@ -40,7 +40,7 @@ public class PathIOException extends IOException {
    *  @param path for the exception
    */
   public PathIOException(String path) {
-    this(path, EIO, null);
+    this(path, EIO);
   }
 
   /**
@@ -59,7 +59,8 @@ public class PathIOException extends IOException {
    * @param error custom string to use an the error text
    */
   public PathIOException(String path, String error) {
-    this(path, error, null);
+    super(error);
+    this.path = path;
   }
 
   protected PathIOException(String path, String error, Throwable cause) {

+ 10 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java

@@ -63,6 +63,8 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   private boolean multipartEnabled;
   private long multipartCopyBlockSize;
   static final long MAX_PART_SIZE = (long)5 * 1024 * 1024 * 1024;
+
+  private String serverSideEncryptionAlgorithm;
   
   public static final Log LOG =
       LogFactory.getLog(Jets3tNativeFileSystemStore.class);
@@ -87,6 +89,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     multipartCopyBlockSize = Math.min(
         conf.getLong("fs.s3n.multipart.copy.block.size", MAX_PART_SIZE),
         MAX_PART_SIZE);
+    serverSideEncryptionAlgorithm = conf.get("fs.s3n.server-side-encryption-algorithm");
 
     bucket = new S3Bucket(uri.getHost());
   }
@@ -107,6 +110,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       object.setDataInputStream(in);
       object.setContentType("binary/octet-stream");
       object.setContentLength(file.length());
+      object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       if (md5Hash != null) {
         object.setMd5Hash(md5Hash);
       }
@@ -130,6 +134,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     object.setDataInputFile(file);
     object.setContentType("binary/octet-stream");
     object.setContentLength(file.length());
+    object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
     if (md5Hash != null) {
       object.setMd5Hash(md5Hash);
     }
@@ -156,6 +161,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       object.setDataInputStream(new ByteArrayInputStream(new byte[0]));
       object.setContentType("binary/octet-stream");
       object.setContentLength(0);
+      object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.putObject(bucket, object);
     } catch (S3ServiceException e) {
       handleS3ServiceException(e);
@@ -317,8 +323,11 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
           return;
         }
       }
+
+      S3Object dstObject = new S3Object(dstKey);
+      dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
-          new S3Object(dstKey), false);
+          dstObject, false);
     } catch (ServiceException e) {
       handleServiceException(srcKey, e);
     }

+ 56 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMappingWithDependency.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * An interface that must be implemented to allow pluggable
+ * DNS-name/IP-address to RackID resolvers.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+
+public interface DNSToSwitchMappingWithDependency extends DNSToSwitchMapping {
+  /**
+   * Get a list of dependent DNS-names for a given DNS-name/IP-address.
+   * Dependent DNS-names fall into the same fault domain which must be
+   * taken into account when placing replicas. This is intended to be used for
+   * cross node group dependencies when node groups are not sufficient to 
+   * distinguish data nodes by fault domains. In practice, this is needed when
+   * a compute server runs VMs which use shared storage (as opposite to 
+   * directly attached storage). In this case data nodes fall in two different
+   * fault domains. One fault domain is defined by a compute server and 
+   * the other is defined by storage. With node groups we can group data nodes
+   * either by server fault domain or by storage fault domain. However one of
+   * the fault domains cannot be handled and there we need to define cross node
+   * group dependencies. These dependencies are applied in block placement 
+   * polices which ensure that no two replicas will be on two dependent nodes. 
+   * @param name - host name or IP address of a data node. Input host name 
+   * parameter must take a value of dfs.datanode.hostname config value if this
+   * config property is set. Otherwise FQDN of the data node is used.
+   * @return list of dependent host names. If dfs.datanode.hostname config
+   * property is set, then its value must be returned.
+   * Otherwise, FQDN is returned. 
+   */
+  public List<String> getDependency(String name);
+}

+ 16 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java

@@ -45,7 +45,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
+public class ScriptBasedMapping extends CachedDNSToSwitchMapping {
 
   /**
    * Minimum number of arguments: {@value}
@@ -63,6 +63,7 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
    */
   static final String SCRIPT_FILENAME_KEY = 
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
+
   /**
    * key to the argument count that the script supports
    * {@value}
@@ -84,7 +85,15 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
    *
    */
   public ScriptBasedMapping() {
-    super(new RawScriptBasedMapping());
+    this(new RawScriptBasedMapping());
+  }
+
+  /**
+   * Create an instance from the given raw mapping
+   * @param rawMap raw DNSTOSwithMapping
+   */
+  public ScriptBasedMapping(DNSToSwitchMapping rawMap) {
+    super(rawMap);
   }
 
   /**
@@ -132,7 +141,7 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
    * This is the uncached script mapping that is fed into the cache managed
    * by the superclass {@link CachedDNSToSwitchMapping}
    */
-  private static final class RawScriptBasedMapping
+  protected static class RawScriptBasedMapping
       extends AbstractDNSToSwitchMapping {
     private String scriptName;
     private int maxArgs; //max hostnames per call of the script
@@ -176,7 +185,7 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
         return m;
       }
 
-      String output = runResolveCommand(names);
+      String output = runResolveCommand(names, scriptName);
       if (output != null) {
         StringTokenizer allSwitchInfo = new StringTokenizer(output);
         while (allSwitchInfo.hasMoreTokens()) {
@@ -208,7 +217,8 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
      * @return null if the number of arguments is out of range,
      * or the output of the command.
      */
-    private String runResolveCommand(List<String> args) {
+    protected String runResolveCommand(List<String> args, 
+        String commandScriptName) {
       int loopCount = 0;
       if (args.size() == 0) {
         return null;
@@ -225,7 +235,7 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
       while (numProcessed != args.size()) {
         int start = maxArgs * loopCount;
         List<String> cmdList = new ArrayList<String>();
-        cmdList.add(scriptName);
+        cmdList.add(commandScriptName);
         for (numProcessed = start; numProcessed < (start + maxArgs) &&
             numProcessed < args.size(); numProcessed++) {
           cmdList.add(args.get(numProcessed));

+ 178 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java

@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.net;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+
+/**
+ * This class extends ScriptBasedMapping class and implements 
+ * the {@link DNSToSwitchMappingWithDependency} interface using 
+ * a script configured via the 
+ * {@link CommonConfigurationKeys#NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY} option.
+ * <p/>
+ * It contains a static class <code>RawScriptBasedMappingWithDependency</code>
+ * that performs the getDependency work.
+ * <p/>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ScriptBasedMappingWithDependency  extends ScriptBasedMapping 
+    implements DNSToSwitchMappingWithDependency {
+  /**
+   * key to the dependency script filename {@value}
+   */
+  static final String DEPENDENCY_SCRIPT_FILENAME_KEY =
+      CommonConfigurationKeys.NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY;
+
+  private Map<String, List<String>> dependencyCache = 
+      new ConcurrentHashMap<String, List<String>>();
+
+  /**
+   * Create an instance with the default configuration.
+   * </p>
+   * Calling {@link #setConf(Configuration)} will trigger a
+   * re-evaluation of the configuration settings and so be used to
+   * set up the mapping script.
+   */
+  public ScriptBasedMappingWithDependency() {
+    super(new RawScriptBasedMappingWithDependency());
+  }
+
+  /**
+   * Get the cached mapping and convert it to its real type
+   * @return the inner raw script mapping.
+   */
+  private RawScriptBasedMappingWithDependency getRawMapping() {
+    return (RawScriptBasedMappingWithDependency)rawMapping;
+  }
+
+  @Override
+  public String toString() {
+    return "script-based mapping with " + getRawMapping().toString();
+  }
+
+  /**
+   * {@inheritDoc}
+   * <p/>
+   * This will get called in the superclass constructor, so a check is needed
+   * to ensure that the raw mapping is defined before trying to relaying a null
+   * configuration.
+   * @param conf
+   */
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    getRawMapping().setConf(conf);
+  }
+
+  /**
+   * Get dependencies in the topology for a given host
+   * @param name - host name for which we are getting dependency
+   * @return a list of hosts dependent on the provided host name
+   */
+  @Override
+  public List<String> getDependency(String name) {
+    //normalize all input names to be in the form of IP addresses
+    name = NetUtils.normalizeHostName(name);
+
+    if (name==null) {
+      return Collections.emptyList();
+    }
+
+    List<String> dependencies = dependencyCache.get(name);
+    if (dependencies == null) {
+      //not cached
+      dependencies = getRawMapping().getDependency(name);
+      if(dependencies != null) {
+        dependencyCache.put(name, dependencies);
+      }
+    }
+
+    return dependencies;
+}
+
+  /**
+   * This is the uncached script mapping that is fed into the cache managed
+   * by the superclass {@link CachedDNSToSwitchMapping}
+   */
+  private static final class RawScriptBasedMappingWithDependency
+      extends ScriptBasedMapping.RawScriptBasedMapping 
+      implements DNSToSwitchMappingWithDependency {
+    private String dependencyScriptName;
+
+    /**
+     * Set the configuration and extract the configuration parameters of interest
+     * @param conf the new configuration
+     */
+    @Override
+    public void setConf (Configuration conf) {
+      super.setConf(conf);
+      if (conf != null) {
+        dependencyScriptName = conf.get(DEPENDENCY_SCRIPT_FILENAME_KEY);
+      } else {
+        dependencyScriptName = null;
+      }
+    }
+
+    /**
+     * Constructor. The mapping is not ready to use until
+     * {@link #setConf(Configuration)} has been called
+     */
+    public RawScriptBasedMappingWithDependency() {}
+
+    @Override
+    public List<String> getDependency(String name) {
+      if (name==null || dependencyScriptName==null) {
+        return Collections.emptyList();
+      }
+
+      List <String> m = new LinkedList<String>();
+      List <String> args = new ArrayList<String>(1);
+      args.add(name);
+  
+      String output = runResolveCommand(args,dependencyScriptName);
+      if (output != null) {
+        StringTokenizer allSwitchInfo = new StringTokenizer(output);
+        while (allSwitchInfo.hasMoreTokens()) {
+          String switchInfo = allSwitchInfo.nextToken();
+          m.add(switchInfo);
+        }
+      } else {
+        // an error occurred. return null to signify this.
+        // (exn was already logged in runResolveCommand)
+        return null;
+      }
+
+      return m;
+    }
+
+    @Override
+    public String toString() {
+      return super.toString() + ", " + dependencyScriptName != null ?
+          ("dependency script " + dependencyScriptName) : NO_SCRIPT;
+    }
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java

@@ -80,7 +80,7 @@ public class SaslPropertiesResolver implements Configurable{
    * The default Sasl Properties read from the configuration
    * @return sasl Properties
    */
-  protected Map<String,String> getDefaultProperties() {
+  public Map<String,String> getDefaultProperties() {
     return properties;
   }
 

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -37,6 +37,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -1464,7 +1465,8 @@ public class UserGroupInformation {
   public synchronized String[] getGroupNames() {
     ensureInitialized();
     try {
-      List<String> result = groups.getGroups(getShortUserName());
+      Set<String> result = new LinkedHashSet<String>
+        (groups.getGroups(getShortUserName()));
       return result.toArray(new String[result.size()]);
     } catch (IOException ie) {
       LOG.warn("No groups available for user " + getShortUserName());

+ 64 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java

@@ -40,13 +40,16 @@ import com.google.common.annotations.VisibleForTesting;
 public class ProxyUsers {
 
   private static final String CONF_HOSTS = ".hosts";
-  public static final String CONF_GROUPS = ".groups";
-  public static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
-  public static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
+  private static final String CONF_USERS = ".users";
+  private static final String CONF_GROUPS = ".groups";
+  private static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
+  private static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
   public static final String CONF_HADOOP_PROXYSERVERS = "hadoop.proxyservers";
   
   private static boolean init = false;
-  // list of groups and hosts per proxyuser
+  //list of users, groups and hosts per proxyuser
+  private static Map<String, Collection<String>> proxyUsers =
+    new HashMap<String, Collection<String>>();
   private static Map<String, Collection<String>> proxyGroups = 
     new HashMap<String, Collection<String>>();
   private static Map<String, Collection<String>> proxyHosts = 
@@ -55,7 +58,7 @@ public class ProxyUsers {
     new HashSet<String>();
 
   /**
-   * reread the conf and get new values for "hadoop.proxyuser.*.groups/hosts"
+   * reread the conf and get new values for "hadoop.proxyuser.*.groups/users/hosts"
    */
   public static void refreshSuperUserGroupsConfiguration() {
     //load server side configuration;
@@ -71,11 +74,20 @@ public class ProxyUsers {
     // remove all existing stuff
     proxyGroups.clear();
     proxyHosts.clear();
+    proxyUsers.clear();
     proxyServers.clear();
+    
+    // get all the new keys for users
+    String regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_USERS;
+    Map<String,String> allMatchKeys = conf.getValByRegex(regex);
+    for(Entry<String, String> entry : allMatchKeys.entrySet()) {  
+        Collection<String> users = StringUtils.getTrimmedStringCollection(entry.getValue());
+        proxyUsers.put(entry.getKey(), users);
+      }
 
     // get all the new keys for groups
-    String regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_GROUPS;
-    Map<String,String> allMatchKeys = conf.getValByRegex(regex);
+    regex = CONF_HADOOP_PROXYUSER_RE+"[^.]*\\"+CONF_GROUPS;
+    allMatchKeys = conf.getValByRegex(regex);
     for(Entry<String, String> entry : allMatchKeys.entrySet()) {
       Collection<String> groups = StringUtils.getTrimmedStringCollection(entry.getValue());
       proxyGroups.put(entry.getKey(), groups );
@@ -108,7 +120,17 @@ public class ProxyUsers {
     }
     return proxyServers.contains(remoteAddr);
   }
-
+  
+  /**
+   * Returns configuration key for effective users allowed for a superuser
+   * 
+   * @param userName name of the superuser
+   * @return configuration key for superuser users
+   */
+  public static String getProxySuperuserUserConfKey(String userName) {
+    return ProxyUsers.CONF_HADOOP_PROXYUSER+userName+ProxyUsers.CONF_USERS;
+  }
+  
   /**
    * Returns configuration key for effective user groups allowed for a superuser
    * 
@@ -146,27 +168,40 @@ public class ProxyUsers {
     if (user.getRealUser() == null) {
       return;
     }
-    boolean groupAuthorized = false;
+    boolean userAuthorized = false;
     boolean ipAuthorized = false;
     UserGroupInformation superUser = user.getRealUser();
-
-    Collection<String> allowedUserGroups = proxyGroups.get(
-        getProxySuperuserGroupConfKey(superUser.getShortUserName()));
     
-    if (isWildcardList(allowedUserGroups)) {
-      groupAuthorized = true;
-    } else if (allowedUserGroups != null && !allowedUserGroups.isEmpty()) {
-      for (String group : user.getGroupNames()) {
-        if (allowedUserGroups.contains(group)) {
-          groupAuthorized = true;
-          break;
-        }
+    Collection<String> allowedUsers = proxyUsers.get(
+        getProxySuperuserUserConfKey(superUser.getShortUserName()));
+
+    if (isWildcardList(allowedUsers)) {
+      userAuthorized = true;
+    } else if (allowedUsers != null && !allowedUsers.isEmpty()) {
+      if (allowedUsers.contains(user.getShortUserName())) {
+        userAuthorized = true;
       }
     }
 
-    if (!groupAuthorized) {
-      throw new AuthorizationException("User: " + superUser.getUserName()
-          + " is not allowed to impersonate " + user.getUserName());
+    if (!userAuthorized) {
+      Collection<String> allowedUserGroups = proxyGroups.get(
+          getProxySuperuserGroupConfKey(superUser.getShortUserName()));
+      
+      if (isWildcardList(allowedUserGroups)) {
+        userAuthorized = true;
+      } else if (allowedUserGroups != null && !allowedUserGroups.isEmpty()) {
+        for (String group : user.getGroupNames()) {
+          if (allowedUserGroups.contains(group)) {
+            userAuthorized = true;
+            break;
+          }
+        }
+      }
+
+      if (!userAuthorized) {
+        throw new AuthorizationException("User: " + superUser.getUserName()
+            + " is not allowed to impersonate " + user.getUserName());
+      }
     }
     
     Collection<String> ipList = proxyHosts.get(
@@ -188,7 +223,7 @@ public class ProxyUsers {
         }
       }
     }
-    if(!ipAuthorized) {
+    if (!ipAuthorized) {
       throw new AuthorizationException("Unauthorized connection for super-user: "
           + superUser.getUserName() + " from IP " + remoteAddress);
     }
@@ -217,6 +252,11 @@ public class ProxyUsers {
       (list.size() == 1) &&
       (list.contains("*"));
   }
+   
+  @VisibleForTesting
+  public static Map<String, Collection<String>> getProxyUsers() {
+    return proxyUsers;
+  }
 
   @VisibleForTesting
   public static Map<String, Collection<String>> getProxyGroups() {

+ 15 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -209,8 +209,7 @@ extends AbstractDelegationTokenIdentifier>
       currentTokens.put(identifier, new DelegationTokenInformation(renewDate,
           password, getTrackingIdIfEnabled(identifier)));
     } else {
-      throw new IOException(
-          "Same delegation token being added twice.");
+      throw new IOException("Same delegation token being added twice.");
     }
   }
 
@@ -355,27 +354,24 @@ extends AbstractDelegationTokenIdentifier>
    */
   public synchronized long renewToken(Token<TokenIdent> token,
                          String renewer) throws InvalidToken, IOException {
-    long now = Time.now();
     ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    LOG.info("Token renewal requested for identifier: "+id);
-    
+    LOG.info("Token renewal for identifier: " + id + "; total currentTokens "
+        +  currentTokens.size());
+
+    long now = Time.now();
     if (id.getMaxDate() < now) {
-      throw new InvalidToken("User " + renewer + 
-                             " tried to renew an expired token");
+      throw new InvalidToken(renewer + " tried to renew an expired token");
     }
     if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
-      throw new AccessControlException("User " + renewer + 
-                                       " tried to renew a token without " +
-                                       "a renewer");
+      throw new AccessControlException(renewer +
+          " tried to renew a token without a renewer");
     }
     if (!id.getRenewer().toString().equals(renewer)) {
-      throw new AccessControlException("Client " + renewer + 
-                                       " tries to renew a token with " +
-                                       "renewer specified as " + 
-                                       id.getRenewer());
+      throw new AccessControlException(renewer +
+          " tries to renew a token with renewer " + id.getRenewer());
     }
     DelegationKey key = allKeys.get(id.getMasterKeyId());
     if (key == null) {
@@ -386,8 +382,8 @@ extends AbstractDelegationTokenIdentifier>
     }
     byte[] password = createPassword(token.getIdentifier(), key.getKey());
     if (!Arrays.equals(password, token.getPassword())) {
-      throw new AccessControlException("Client " + renewer
-          + " is trying to renew a token with " + "wrong password");
+      throw new AccessControlException(renewer +
+          " is trying to renew a token with wrong password");
     }
     long renewTime = Math.min(id.getMaxDate(), now + tokenRenewInterval);
     String trackingId = getTrackingIdIfEnabled(id);
@@ -429,8 +425,7 @@ extends AbstractDelegationTokenIdentifier>
       throw new AccessControlException(canceller
           + " is not authorized to cancel the token");
     }
-    DelegationTokenInformation info = null;
-    info = currentTokens.remove(id);
+    DelegationTokenInformation info = currentTokens.remove(id);
     if (info == null) {
       throw new InvalidToken("Token not found");
     }
@@ -554,14 +549,11 @@ extends AbstractDelegationTokenIdentifier>
           try {
             Thread.sleep(Math.min(5000, keyUpdateInterval)); // 5 seconds
           } catch (InterruptedException ie) {
-            LOG
-            .error("InterruptedExcpetion recieved for ExpiredTokenRemover thread "
-                + ie);
+            LOG.error("ExpiredTokenRemover received " + ie);
           }
         }
       } catch (Throwable t) {
-        LOG.error("ExpiredTokenRemover thread received unexpected exception. "
-            + t);
+        LOG.error("ExpiredTokenRemover thread received unexpected exception", t);
         Runtime.getRuntime().exit(-1);
       }
     }

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java

@@ -141,8 +141,7 @@ public class CompositeService extends AbstractService {
    * @throws RuntimeException the first exception raised during the
    * stop process -<i>after all services are stopped</i>
    */
-  private synchronized void stop(int numOfServicesStarted,
-                                 boolean stopOnlyStartedServices) {
+  private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
     // stop in reverse order of start
     Exception firstException = null;
     List<Service> services = getServices();

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.crypto.key.KeyProviderFactory

@@ -15,3 +15,4 @@
 
 org.apache.hadoop.crypto.key.JavaKeyStoreProvider$Factory
 org.apache.hadoop.crypto.key.UserProvider$Factory
+org.apache.hadoop.crypto.key.kms.KMSClientProvider$Factory

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -575,6 +575,14 @@
   </description>
 </property>
 
+<property>
+  <name>fs.s3n.server-side-encryption-algorithm</name>
+  <value></value>
+  <description>Specify a server-side encryption algorithm for S3.
+  The default is NULL, and the only other currently allowable value is AES256.
+  </description>
+</property>
+
 <property>
   <name>io.seqfile.compress.blocksize</name>
   <value>1000000</value>

+ 15 - 5
hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm

@@ -226,24 +226,34 @@ Hadoop MapReduce Next Generation - Cluster Setup
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.address>>> | | |
 | | <<<ResourceManager>>> host:port for clients to submit jobs. | |
-| | | <host:port> |
+| | | <host:port>\ |
+| | | If set, overrides the hostname set in <<<yarn.resourcemanager.hostname>>>. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.scheduler.address>>> | | |
 | | <<<ResourceManager>>> host:port for ApplicationMasters to talk to | |
 | | Scheduler to obtain resources. | |
-| | | <host:port> |
+| | | <host:port>\ |
+| | | If set, overrides the hostname set in <<<yarn.resourcemanager.hostname>>>. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.resource-tracker.address>>> | | |
 | | <<<ResourceManager>>> host:port for NodeManagers. | |
-| | | <host:port> |
+| | | <host:port>\ |
+| | | If set, overrides the hostname set in <<<yarn.resourcemanager.hostname>>>. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.admin.address>>> | | |
 | | <<<ResourceManager>>> host:port for administrative commands. | |
-| | | <host:port> |
+| | | <host:port>\ |
+| | | If set, overrides the hostname set in <<<yarn.resourcemanager.hostname>>>. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.webapp.address>>> | | |
 | | <<<ResourceManager>>> web-ui host:port. | |
-| | | <host:port> |
+| | | <host:port>\ |
+| | | If set, overrides the hostname set in <<<yarn.resourcemanager.hostname>>>. |
+*-------------------------+-------------------------+------------------------+
+| <<<yarn.resourcemanager.hostname>>> | | |
+| | <<<ResourceManager>>> host. | |
+| | | <host>\ |
+| | | Single hostname that can be set in place of setting all <<<yarn.resourcemanager*address>>> resources.  Results in default ports for ResourceManager components. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.resourcemanager.scheduler.class>>> | | |
 | | <<<ResourceManager>>> Scheduler class. | |

+ 20 - 2
hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm

@@ -176,9 +176,11 @@ KVNO Timestamp         Principal
   the rule specified by <<<hadoop.security.auth_to_local>>>
   which works in the same way as the <<<auth_to_local>>> in
   {{{http://web.mit.edu/Kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html}Kerberos configuration file (krb5.conf)}}.
+  In addition, Hadoop <<<auth_to_local>>> mapping supports the <</L>> flag that
+  lowercases the returned name.
 
   By default, it picks the first component of principal name as a user name
-  if the realms matches to the <<<defalut_realm>>> (usually defined in /etc/krb5.conf).
+  if the realms matches to the <<<default_realm>>> (usually defined in /etc/krb5.conf).
   For example, <<<host/full.qualified.domain.name@REALM.TLD>>> is mapped to <<<host>>>
   by default rule.
 
@@ -201,7 +203,9 @@ KVNO Timestamp         Principal
   Some products such as Apache Oozie which access the services of Hadoop
   on behalf of end users need to be able to impersonate end users.
   You can configure proxy user using properties
-  <<<hadoop.proxyuser.${superuser}.hosts>>> and <<<hadoop.proxyuser.${superuser}.groups>>>.
+  <<<hadoop.proxyuser.${superuser}.hosts>>> along with either or both of 
+  <<<hadoop.proxyuser.${superuser}.groups>>>
+  and <<<hadoop.proxyuser.${superuser}.users>>>.
 
   For example, by specifying as below in core-site.xml,
   user named <<<oozie>>> accessing from any host
@@ -218,6 +222,20 @@ KVNO Timestamp         Principal
   </property>
 ----
 
+  User named <<<oozie>>> accessing from any host
+  can impersonate user1 and user2 by specifying as below in core-site.xml.
+
+----
+  <property>
+    <name>hadoop.proxyuser.oozie.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.oozie.users</name>
+    <value>user1,user2</value>
+  </property>
+----
+
 ** Secure DataNode
 
   Because the data transfer protocol of DataNode

+ 23 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathExceptions.java

@@ -19,11 +19,13 @@
 package org.apache.hadoop.fs.shell;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.ipc.RemoteException;
 import org.junit.Test;
 
 public class TestPathExceptions {
@@ -52,5 +54,25 @@ public class TestPathExceptions {
     assertEquals(new Path(path), pe.getPath());
     assertEquals("`" + path + "': " + error, pe.getMessage());
   }
-  
+
+  @Test
+  public void testRemoteExceptionUnwrap() throws Exception {
+    PathIOException pe;
+    RemoteException re;
+    IOException ie;
+    
+    pe = new PathIOException(path);
+    re = new RemoteException(PathIOException.class.getName(), "test constructor1");
+    ie = re.unwrapRemoteException();
+    assertTrue(ie instanceof PathIOException);
+    ie = re.unwrapRemoteException(PathIOException.class);
+    assertTrue(ie instanceof PathIOException);
+
+    pe = new PathIOException(path, "constructor2");
+    re = new RemoteException(PathIOException.class.getName(), "test constructor2");
+    ie = re.unwrapRemoteException();
+    assertTrue(ie instanceof PathIOException);
+    ie = re.unwrapRemoteException(PathIOException.class);
+    assertTrue(ie instanceof PathIOException);    
+  }
 }

+ 0 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -259,13 +259,6 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     conn.connect();
     assertEquals(200, conn.getResponseCode());
     assertEquals("text/html; charset=utf-8", conn.getContentType());
-
-    // JSPs should default to text/html with utf8
-    servletUrl = new URL(baseUrl, "/testjsp.jsp");
-    conn = (HttpURLConnection)servletUrl.openConnection();
-    conn.connect();
-    assertEquals(200, conn.getResponseCode());
-    assertEquals("text/html; charset=utf-8", conn.getContentType());
   }
 
   /**

+ 86 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java

@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+
+public class TestScriptBasedMappingWithDependency extends TestCase {
+
+  
+  public TestScriptBasedMappingWithDependency() {
+
+  }
+
+  @Test
+  public void testNoArgsMeansNoResult() {
+    Configuration conf = new Configuration();
+    conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
+                ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
+    conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename-1");
+    conf.set(ScriptBasedMappingWithDependency.DEPENDENCY_SCRIPT_FILENAME_KEY, 
+        "any-filename-2");
+    conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY, 10);
+
+    ScriptBasedMappingWithDependency mapping = createMapping(conf);
+    List<String> names = new ArrayList<String>();
+    names.add("some.machine.name");
+    names.add("other.machine.name");
+    List<String> result = mapping.resolve(names);
+    assertNull("Expected an empty list for resolve", result);
+    result = mapping.getDependency("some.machine.name");
+    assertNull("Expected an empty list for getDependency", result);
+  }
+
+  @Test
+  public void testNoFilenameMeansSingleSwitch() throws Throwable {
+    Configuration conf = new Configuration();
+    ScriptBasedMapping mapping = createMapping(conf);
+    assertTrue("Expected to be single switch", mapping.isSingleSwitch());
+    assertTrue("Expected to be single switch",
+               AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
+  }
+
+  @Test
+  public void testFilenameMeansMultiSwitch() throws Throwable {
+    Configuration conf = new Configuration();
+    conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
+    ScriptBasedMapping mapping = createMapping(conf);
+    assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
+    mapping.setConf(new Configuration());
+    assertTrue("Expected to be single switch", mapping.isSingleSwitch());
+  }
+
+  @Test
+  public void testNullConfig() throws Throwable {
+    ScriptBasedMapping mapping = createMapping(null);
+    assertTrue("Expected to be single switch", mapping.isSingleSwitch());
+  }
+
+  private ScriptBasedMappingWithDependency createMapping(Configuration conf) {
+    ScriptBasedMappingWithDependency mapping = 
+        new ScriptBasedMappingWithDependency();
+    mapping.setConf(conf);
+    return mapping;
+  }
+}

+ 70 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java

@@ -39,6 +39,7 @@ public class TestProxyUsers {
     LogFactory.getLog(TestProxyUsers.class);
   private static final String REAL_USER_NAME = "proxier";
   private static final String PROXY_USER_NAME = "proxied_user";
+  private static final String AUTHORIZED_PROXY_USER_NAME = "authorized_proxied_user";
   private static final String[] GROUP_NAMES =
     new String[] { "foo_group" };
   private static final String[] NETGROUP_NAMES =
@@ -158,7 +159,41 @@ public class TestProxyUsers {
     // From bad IP
     assertNotAuthorized(proxyUserUgi, "1.2.3.5");
   }
+  
+  @Test
+  public void testProxyUsersWithUserConf() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(
+      ProxyUsers.getProxySuperuserUserConfKey(REAL_USER_NAME),
+      StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
+    conf.set(
+      ProxyUsers.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      PROXY_IP);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
 
+
+    // First try proxying a user that's allowed
+    UserGroupInformation realUserUgi = UserGroupInformation
+        .createRemoteUser(REAL_USER_NAME);
+    UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
+
+    // From good IP
+    assertAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+
+    // Now try proxying a user that's not allowed
+    realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
+    proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
+    
+    // From good IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+  }
+  
   @Test
   public void testWildcardGroup() {
     Configuration conf = new Configuration();
@@ -192,6 +227,40 @@ public class TestProxyUsers {
     // From bad IP
     assertNotAuthorized(proxyUserUgi, "1.2.3.5");
   }
+  
+  @Test
+  public void testWildcardUser() {
+    Configuration conf = new Configuration();
+    conf.set(
+      ProxyUsers.getProxySuperuserUserConfKey(REAL_USER_NAME),
+      "*");
+    conf.set(
+      ProxyUsers.getProxySuperuserIpConfKey(REAL_USER_NAME),
+      PROXY_IP);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+
+    // First try proxying a user that's allowed
+    UserGroupInformation realUserUgi = UserGroupInformation
+        .createRemoteUser(REAL_USER_NAME);
+    UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
+
+    // From good IP
+    assertAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+
+    // Now try proxying a different user (just to make sure we aren't getting spill over
+    // from the other test case!)
+    realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
+    proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+        PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES);
+    
+    // From good IP
+    assertAuthorized(proxyUserUgi, "1.2.3.4");
+    // From bad IP
+    assertNotAuthorized(proxyUserUgi, "1.2.3.5");
+  }
 
   @Test
   public void testWildcardIP() {
@@ -282,7 +351,7 @@ public class TestProxyUsers {
     try {
       ProxyUsers.authorize(proxyUgi, host);
     } catch (AuthorizationException e) {
-      fail("Did not allowed authorization of " + proxyUgi + " from " + host);
+      fail("Did not allow authorization of " + proxyUgi + " from " + host);
     }
   }
 }

+ 18 - 13
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java

@@ -39,17 +39,24 @@ import java.util.zip.ZipOutputStream;
  */
 public class JarFinder {
 
-  private static void copyToZipStream(InputStream is, ZipEntry entry,
+  private static void copyToZipStream(File file, ZipEntry entry,
                               ZipOutputStream zos) throws IOException {
-    zos.putNextEntry(entry);
-    byte[] arr = new byte[4096];
-    int read = is.read(arr);
-    while (read > -1) {
-      zos.write(arr, 0, read);
-      read = is.read(arr);
+    InputStream is = new FileInputStream(file);
+    try {
+      zos.putNextEntry(entry);
+      byte[] arr = new byte[4096];
+      int read = is.read(arr);
+      while (read > -1) {
+        zos.write(arr, 0, read);
+        read = is.read(arr);
+      }
+    } finally {
+      try {
+        is.close();
+      } finally {
+        zos.closeEntry();
+      }
     }
-    is.close();
-    zos.closeEntry();
   }
 
   public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
@@ -66,8 +73,7 @@ public class JarFinder {
       new Manifest().write(new BufferedOutputStream(zos));
       zos.closeEntry();
     } else {
-      InputStream is = new FileInputStream(manifestFile);
-      copyToZipStream(is, manifestEntry, zos);
+      copyToZipStream(manifestFile, manifestEntry, zos);
     }
     zos.closeEntry();
     zipDir(dir, relativePath, zos, true);
@@ -94,8 +100,7 @@ public class JarFinder {
           String path = relativePath + f.getName();
           if (!path.equals(JarFile.MANIFEST_NAME)) {
             ZipEntry anEntry = new ZipEntry(path);
-            InputStream is = new FileInputStream(f);
-            copyToZipStream(is, anEntry, zos);
+            copyToZipStream(f, anEntry, zos);
           }
         }
       }

+ 41 - 0
hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml

@@ -0,0 +1,41 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <!--
+    Findbug is complaining about System.out being NULL
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.crypto.key.kms.server.KMSWebApp"/>
+    <Bug pattern="NP_ALWAYS_NULL"/>
+  </Match>
+  <!--
+    KMSWebApp is a webapp singleton managed by the servlet container via
+    ServletContextListener.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.crypto.key.kms.server.KMSWebApp"/>
+    <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD"/>
+  </Match>
+  <!--
+    KMSWebApp does an exit to kill the servlet container if the initialization
+    fails.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.crypto.key.kms.server.KMSWebApp"/>
+    <Bug pattern="DM_EXIT"/>
+  </Match>
+</FindBugsFilter>

+ 408 - 0
hadoop-common-project/hadoop-kms/pom.xml

@@ -0,0 +1,408 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-kms</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <packaging>war</packaging>
+
+  <name>Apache Hadoop KMS</name>
+  <description>Apache Hadoop KMS</description>
+
+  <properties>
+    <tomcat.version>6.0.36</tomcat.version>
+    <kms.tomcat.dist.dir>
+      ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat
+    </kms.tomcat.dist.dir>
+    <tomcat.download.url>
+      http://archive.apache.org/dist/tomcat/tomcat-6/v${tomcat.version}/bin/apache-tomcat-${tomcat.version}.tar.gz
+    </tomcat.download.url>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.xml.stream</groupId>
+          <artifactId>stax-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-httpclient</groupId>
+          <artifactId>commons-httpclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.java.dev.jets3t</groupId>
+          <artifactId>jets3t</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jdt</groupId>
+          <artifactId>core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>jul-to-slf4j</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.codahale.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+      <scope>compile</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <forkMode>always</forkMode>
+          <threadCount>1</threadCount>
+          <forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
+          <excludes>
+            <exclude>**/${test.exclude}.java</exclude>
+            <exclude>${test.exclude.pattern}</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-web-xmls</id>
+            <phase>generate-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <mkdir dir="${project.build.directory}/test-classes/webapp"/>
+
+                <copy todir="${project.build.directory}/test-classes/webapp">
+                  <fileset dir="${basedir}/src/main/webapp"/>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-war-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>default-war</id>
+            <phase>package</phase>
+            <goals>
+              <goal>war</goal>
+            </goals>
+            <configuration>
+              <warName>kms</warName>
+              <webappDirectory>${project.build.directory}/kms
+              </webappDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
+          </excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <profiles>
+    <profile>
+      <id>docs</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-site-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>docs</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>site</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
+    <profile>
+      <id>dist</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-assemblies</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <id>dist</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+                <configuration>
+                  <finalName>${project.artifactId}-${project.version}
+                  </finalName>
+                  <appendAssemblyId>false</appendAssemblyId>
+                  <attach>false</attach>
+                  <descriptorRefs>
+                    <descriptorRef>hadoop-kms-dist</descriptorRef>
+                  </descriptorRefs>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <!-- Downloading Tomcat TAR.GZ, using downloads/ dir to avoid downloading over an over -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>dist</id>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <phase>package</phase>
+                <configuration>
+                  <target>
+                    <mkdir dir="downloads"/>
+                    <get
+                      src="${tomcat.download.url}"
+                      dest="downloads/apache-tomcat-${tomcat.version}.tar.gz"
+                      verbose="true" skipexisting="true"/>
+                    <delete dir="${project.build.directory}/tomcat.exp"/>
+                    <mkdir dir="${project.build.directory}/tomcat.exp"/>
+
+                    <!-- Using Unix script to preserve file permissions -->
+                    <echo file="${project.build.directory}/tomcat-untar.sh">
+                      cd "${project.build.directory}/tomcat.exp"
+                      gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
+                    </echo>
+                    <exec executable="sh" dir="${project.build.directory}"
+                          failonerror="true">
+                      <arg line="./tomcat-untar.sh"/>
+                    </exec>
+
+                    <move
+                      file="${project.build.directory}/tomcat.exp/apache-tomcat-${tomcat.version}"
+                      tofile="${kms.tomcat.dist.dir}"/>
+                    <delete dir="${project.build.directory}/tomcat.exp"/>
+                    <delete dir="${kms.tomcat.dist.dir}/webapps"/>
+                    <mkdir dir="${kms.tomcat.dist.dir}/webapps"/>
+                    <delete file="${kms.tomcat.dist.dir}/conf/server.xml"/>
+                    <copy file="${basedir}/src/main/tomcat/server.xml"
+                          toDir="${kms.tomcat.dist.dir}/conf"/>
+                    <delete file="${kms.tomcat.dist.dir}/conf/ssl-server.xml"/>
+                    <copy file="${basedir}/src/main/tomcat/ssl-server.xml"
+                          toDir="${kms.tomcat.dist.dir}/conf"/>
+                    <delete
+                      file="${kms.tomcat.dist.dir}/conf/logging.properties"/>
+                    <copy file="${basedir}/src/main/tomcat/logging.properties"
+                          toDir="${kms.tomcat.dist.dir}/conf"/>
+                    <copy toDir="${kms.tomcat.dist.dir}/webapps/ROOT">
+                      <fileset dir="${basedir}/src/main/tomcat/ROOT"/>
+                    </copy>
+                    <copy toDir="${kms.tomcat.dist.dir}/webapps/kms">
+                      <fileset dir="${project.build.directory}/kms"/>
+                    </copy>
+                  </target>
+                </configuration>
+              </execution>
+              <execution>
+                <id>tar</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <configuration>
+                  <target if="tar">
+                    <!-- Using Unix script to preserve symlinks -->
+                    <echo file="${project.build.directory}/dist-maketar.sh">
+                      cd "${project.build.directory}"
+                      tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
+                    </echo>
+                    <exec executable="sh" dir="${project.build.directory}"
+                          failonerror="true">
+                      <arg line="./dist-maketar.sh"/>
+                    </exec>
+                  </target>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

+ 82 - 0
hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml

@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- This file is hot-reloaded when it changes -->
+
+  <!-- KMS ACLs -->
+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operation.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata an get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+      Complimentary ACL for CREATE and ROLLOVER operation to allow the client
+      to provide the key material when creating or rolling a key.
+    </description>
+  </property>
+
+</configuration>

+ 45 - 0
hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password

+ 38 - 0
hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties

@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF

+ 71 - 0
hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml

@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- KMS Backend KeyProvider -->
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+    <description>
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+    <value>none</value>
+    <description>
+    </description>
+  </property>
+
+  <!-- KMS Cache -->
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>10000</value>
+    <description>
+    </description>
+  </property>
+
+  <!-- KMS Security -->
+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>
+      simple or kerberos
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>
+    </description>
+  </property>
+
+</configuration>

+ 305 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -0,0 +1,305 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.util.StringUtils;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.SecurityContext;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.security.Principal;
+import java.text.MessageFormat;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class providing the REST bindings, via Jersey, for the KMS.
+ */
+@Path(KMSRESTConstants.SERVICE_VERSION)
+@InterfaceAudience.Private
+public class KMS {
+  private static final String CREATE_KEY = "CREATE_KEY";
+  private static final String DELETE_KEY = "DELETE_KEY";
+  private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
+  private static final String GET_KEYS = "GET_KEYS";
+  private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
+  private static final String GET_KEY_VERSION = "GET_KEY_VERSION";
+  private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
+  private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
+  private static final String GET_METADATA = "GET_METADATA";
+
+  private KeyProvider provider;
+
+  public KMS() throws Exception {
+    provider = KMSWebApp.getKeyProvider();
+  }
+
+  private static Principal getPrincipal(SecurityContext securityContext)
+      throws AuthenticationException{
+    Principal user = securityContext.getUserPrincipal();
+    if (user == null) {
+      throw new AuthenticationException("User must be authenticated");
+    }
+    return user;
+  }
+
+  private static void assertAccess(KMSACLs.Type aclType, Principal principal,
+      String operation, String key) throws AccessControlException {
+    if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
+      KMSWebApp.getUnauthorizedCallsMeter().mark();
+      KMSAudit.unauthorized(principal, operation, key);
+      throw new AuthorizationException(MessageFormat.format(
+          "User:{0} not allowed to do ''{1}'' on ''{2}''",
+          principal.getName(), operation, key));
+    }
+  }
+
+  private static KeyProvider.KeyVersion removeKeyMaterial(
+      KeyProvider.KeyVersion keyVersion) {
+    return new KMSClientProvider.KMSKeyVersion(keyVersion.getVersionName(),
+        null);
+  }
+
+  private static URI getKeyURI(String name) throws URISyntaxException {
+    return new URI(KMSRESTConstants.SERVICE_VERSION + "/" +
+        KMSRESTConstants.KEY_RESOURCE + "/" + name);
+  }
+
+  @POST
+  @Path(KMSRESTConstants.KEYS_RESOURCE)
+  @Consumes(MediaType.APPLICATION_JSON)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response createKey(@Context SecurityContext securityContext,
+      Map jsonKey) throws Exception {
+    KMSWebApp.getAdminCallsMeter().mark();
+    Principal user = getPrincipal(securityContext);
+    String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
+    KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
+    assertAccess(KMSACLs.Type.CREATE, user, CREATE_KEY, name);
+    String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
+    String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
+    int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
+                 ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
+    String description = (String)
+        jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+
+    if (material != null) {
+      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+          CREATE_KEY + " with user provided material", name);
+    }
+    KeyProvider.Options options = new KeyProvider.Options(
+        KMSWebApp.getConfiguration());
+    if (cipher != null) {
+      options.setCipher(cipher);
+    }
+    if (length != 0) {
+      options.setBitLength(length);
+    }
+    options.setDescription(description);
+
+    KeyProvider.KeyVersion keyVersion = (material != null)
+        ? provider.createKey(name, Base64.decodeBase64(material), options)
+        : provider.createKey(name, options);
+
+    provider.flush();
+
+    KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
+        (material != null) + " Description:" + description);
+
+    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
+      keyVersion = removeKeyMaterial(keyVersion);
+    }
+    Map json = KMSServerJSONUtils.toJSON(keyVersion);
+    String requestURL = KMSMDCFilter.getURL();
+    int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
+    requestURL = requestURL.substring(0, idx);
+    String keyURL = requestURL + KMSRESTConstants.KEY_RESOURCE + "/" + name;
+    return Response.created(getKeyURI(name)).type(MediaType.APPLICATION_JSON).
+        header("Location", keyURL).entity(json).build();
+  }
+
+  @DELETE
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
+  public Response deleteKey(@Context SecurityContext securityContext,
+      @PathParam("name") String name) throws Exception {
+    KMSWebApp.getAdminCallsMeter().mark();
+    Principal user = getPrincipal(securityContext);
+    assertAccess(KMSACLs.Type.DELETE, user, DELETE_KEY, name);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    provider.deleteKey(name);
+    provider.flush();
+
+    KMSAudit.ok(user, DELETE_KEY, name, "");
+
+    return Response.ok().build();
+  }
+
+  @POST
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
+  @Consumes(MediaType.APPLICATION_JSON)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response rolloverKey(@Context SecurityContext securityContext,
+      @PathParam("name") String name, Map jsonMaterial)
+      throws Exception {
+    KMSWebApp.getAdminCallsMeter().mark();
+    Principal user = getPrincipal(securityContext);
+    assertAccess(KMSACLs.Type.ROLLOVER, user, ROLL_NEW_VERSION, name);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    String material = (String)
+        jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
+    if (material != null) {
+      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+          ROLL_NEW_VERSION + " with user provided material", name);
+    }
+    KeyProvider.KeyVersion keyVersion = (material != null)
+        ? provider.rollNewVersion(name, Base64.decodeBase64(material))
+        : provider.rollNewVersion(name);
+
+    provider.flush();
+
+    KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+        (material != null) + " NewVersion:" + keyVersion.getVersionName());
+
+    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
+      keyVersion = removeKeyMaterial(keyVersion);
+    }
+    Map json = KMSServerJSONUtils.toJSON(keyVersion);
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getKeysMetadata(@Context SecurityContext securityContext,
+      @QueryParam(KMSRESTConstants.KEY_OP) List<String> keyNamesList)
+      throws Exception {
+    KMSWebApp.getAdminCallsMeter().mark();
+    Principal user = getPrincipal(securityContext);
+    String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
+    String names = StringUtils.arrayToString(keyNames);
+    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names);
+    KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
+    Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
+    KMSAudit.ok(user, GET_KEYS_METADATA, names, "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getKeyNames(@Context SecurityContext securityContext)
+      throws Exception {
+    KMSWebApp.getAdminCallsMeter().mark();
+    Principal user = getPrincipal(securityContext);
+    assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*");
+    Object json = provider.getKeys();
+    KMSAudit.ok(user, GET_KEYS, "*", "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
+  public Response getKey(@Context SecurityContext securityContext,
+      @PathParam("name") String name)
+      throws Exception {
+    return getMetadata(securityContext, name);
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
+      KMSRESTConstants.METADATA_SUB_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getMetadata(@Context SecurityContext securityContext,
+      @PathParam("name") String name)
+      throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    KMSWebApp.getAdminCallsMeter().mark();
+    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
+    Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
+    KMSAudit.ok(user, GET_METADATA, name, "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
+      KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getCurrentVersion(@Context SecurityContext securityContext,
+      @PathParam("name") String name)
+      throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    KMSWebApp.getKeyCallsMeter().mark();
+    assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
+    Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
+    KMSAudit.ok(user, GET_CURRENT_KEY, name, "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getKeyVersion(@Context SecurityContext securityContext,
+      @PathParam("versionName") String versionName)
+      throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(versionName, "versionName");
+    KMSWebApp.getKeyCallsMeter().mark();
+    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName);
+    Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName));
+    KMSAudit.ok(user, GET_KEY_VERSION, versionName, "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+  @GET
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
+      KMSRESTConstants.VERSIONS_SUB_RESOURCE)
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getKeyVersions(@Context SecurityContext securityContext,
+      @PathParam("name") String name)
+      throws Exception {
+    Principal user = getPrincipal(securityContext);
+    KMSClientProvider.checkNotEmpty(name, "name");
+    KMSWebApp.getKeyCallsMeter().mark();
+    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
+    Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
+    KMSAudit.ok(user, GET_KEY_VERSIONS, name, "");
+    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+  }
+
+}

+ 133 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java

@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Provides access to the <code>AccessControlList</code>s used by KMS,
+ * hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
+ * are defined has been updated.
+ */
+public class KMSACLs implements Runnable {
+  private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
+
+
+  public enum Type {
+    CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA, SET_KEY_MATERIAL;
+
+    public String getConfigKey() {
+      return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
+    }
+  }
+
+  public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE;
+
+  public static final int RELOADER_SLEEP_MILLIS = 1000;
+
+  Map<Type, AccessControlList> acls;
+  private ReadWriteLock lock;
+  private ScheduledExecutorService executorService;
+  private long lastReload;
+
+  KMSACLs(Configuration conf) {
+    lock = new ReentrantReadWriteLock();
+    if (conf == null) {
+      conf = loadACLs();
+    }
+    setACLs(conf);
+  }
+
+  public KMSACLs() {
+    this(null);
+  }
+
+  private void setACLs(Configuration conf) {
+    lock.writeLock().lock();
+    try {
+      acls = new HashMap<Type, AccessControlList>();
+      for (Type aclType : Type.values()) {
+        String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
+        acls.put(aclType, new AccessControlList(aclStr));
+        LOG.info("'{}' ACL '{}'", aclType, aclStr);
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void run() {
+    try {
+      if (KMSConfiguration.isACLsFileNewer(lastReload)) {
+        setACLs(loadACLs());
+      }
+    } catch (Exception ex) {
+      LOG.warn("Could not reload ACLs file: " + ex.toString(), ex);
+    }
+  }
+
+  public synchronized void startReloader() {
+    if (executorService == null) {
+      executorService = Executors.newScheduledThreadPool(1);
+      executorService.scheduleAtFixedRate(this, RELOADER_SLEEP_MILLIS,
+          RELOADER_SLEEP_MILLIS, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  public synchronized void stopReloader() {
+    if (executorService != null) {
+      executorService.shutdownNow();
+      executorService = null;
+    }
+  }
+
+  private Configuration loadACLs() {
+    LOG.debug("Loading ACLs file");
+    lastReload = System.currentTimeMillis();
+    Configuration conf = KMSConfiguration.getACLsConf();
+    // triggering the resource loading.
+    conf.get(Type.CREATE.getConfigKey());
+    return conf;
+  }
+
+  public boolean hasAccess(Type type, String user) {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+    AccessControlList acl = null;
+    lock.readLock().lock();
+    try {
+      acl = acls.get(type);
+    } finally {
+      lock.readLock().unlock();
+    }
+    return acl.isUserAllowed(ugi);
+  }
+
+}

+ 62 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.security.Principal;
+
+/**
+ * Provides convenience methods for audit logging consistently the different
+ * types of events.
+ */
+public class KMSAudit {
+  public static final String KMS_LOGGER_NAME = "kms-audit";
+
+  private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
+
+  private static void op(String status, String op, Principal user, String key,
+      String extraMsg) {
+    AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(),
+        op, key, extraMsg);
+  }
+
+  public static void ok(Principal user, String op, String key,
+      String extraMsg) {
+    op("OK", op, user, key, extraMsg);
+  }
+
+  public static void unauthorized(Principal user, String op, String key) {
+    op("UNAUTHORIZED", op, user, key, "");
+  }
+
+  public static void error(Principal user, String method, String url,
+      String extraMsg) {
+    AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'",
+        user.getName(), method, url, extraMsg);
+  }
+
+  public static void unauthenticated(String remoteHost, String method,
+      String url, String extraMsg) {
+    AUDIT_LOG.info(
+        "Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'",
+        remoteHost, method, url, extraMsg);
+  }
+
+}

+ 123 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java

@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Authentication filter that takes the configuration from the KMS configuration
+ * file.
+ */
+@InterfaceAudience.Private
+public class KMSAuthenticationFilter extends AuthenticationFilter {
+  private static final String CONF_PREFIX = KMSConfiguration.CONFIG_PREFIX +
+      "authentication.";
+
+  @Override
+  protected Properties getConfiguration(String configPrefix,
+      FilterConfig filterConfig) {
+    Properties props = new Properties();
+    Configuration conf = KMSWebApp.getConfiguration();
+    for (Map.Entry<String, String> entry : conf) {
+      String name = entry.getKey();
+      if (name.startsWith(CONF_PREFIX)) {
+        String value = conf.get(name);
+        name = name.substring(CONF_PREFIX.length());
+        props.setProperty(name, value);
+      }
+    }
+    return props;
+  }
+
+  private static class KMSResponse extends HttpServletResponseWrapper {
+    public int statusCode;
+    public String msg;
+
+    public KMSResponse(ServletResponse response) {
+      super((HttpServletResponse)response);
+    }
+
+    @Override
+    public void setStatus(int sc) {
+      statusCode = sc;
+      super.setStatus(sc);
+    }
+
+    @Override
+    public void sendError(int sc, String msg) throws IOException {
+      statusCode = sc;
+      this.msg = msg;
+      super.sendError(sc, msg);
+    }
+
+    @Override
+    public void sendError(int sc) throws IOException {
+      statusCode = sc;
+      super.sendError(sc);
+    }
+
+    @Override
+    public void setStatus(int sc, String sm) {
+      statusCode = sc;
+      msg = sm;
+      super.setStatus(sc, sm);
+    }
+  }
+
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response,
+      FilterChain filterChain) throws IOException, ServletException {
+    KMSResponse kmsResponse = new KMSResponse(response);
+    super.doFilter(request, kmsResponse, filterChain);
+
+    if (kmsResponse.statusCode != HttpServletResponse.SC_OK &&
+        kmsResponse.statusCode != HttpServletResponse.SC_CREATED &&
+        kmsResponse.statusCode != HttpServletResponse.SC_UNAUTHORIZED) {
+      KMSWebApp.getInvalidCallsMeter().mark();
+    }
+
+    // HttpServletResponse.SC_UNAUTHORIZED is because the request does not
+    // belong to an authenticated user.
+    if (kmsResponse.statusCode == HttpServletResponse.SC_UNAUTHORIZED) {
+      KMSWebApp.getUnauthenticatedCallsMeter().mark();
+      String method = ((HttpServletRequest) request).getMethod();
+      StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
+      String queryString = ((HttpServletRequest) request).getQueryString();
+      if (queryString != null) {
+        requestURL.append("?").append(queryString);
+      }
+      KMSAudit.unauthenticated(request.getRemoteHost(), method,
+          requestURL.toString(), kmsResponse.msg);
+    }
+  }
+
+}

+ 180 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java

@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import org.apache.hadoop.crypto.key.KeyProvider;
+
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A <code>KeyProvider</code> proxy implementation providing a short lived
+ * cache for <code>KeyVersions</code> to avoid burst of requests to hit the
+ * underlying <code>KeyProvider</code>.
+ */
+public class KMSCacheKeyProvider extends KeyProvider {
+  private final KeyProvider provider;
+  private LoadingCache<String, KeyVersion> keyVersionCache;
+  private LoadingCache<String, KeyVersion> currentKeyCache;
+
+  private static class KeyNotFoundException extends Exception {
+    private static final long serialVersionUID = 1L;
+  }
+
+  public KMSCacheKeyProvider(KeyProvider prov, long timeoutMillis) {
+    this.provider =  prov;
+    keyVersionCache = CacheBuilder.newBuilder().expireAfterAccess(timeoutMillis,
+        TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
+      @Override
+      public KeyVersion load(String key) throws Exception {
+        KeyVersion kv = provider.getKeyVersion(key);
+        if (kv == null) {
+          throw new KeyNotFoundException();
+        }
+        return kv;
+      }
+    });
+    // for current key we don't want to go stale for more than 1 sec
+    currentKeyCache = CacheBuilder.newBuilder().expireAfterWrite(1000,
+        TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
+      @Override
+      public KeyVersion load(String key) throws Exception {
+        KeyVersion kv =  provider.getCurrentKey(key);
+        if (kv == null) {
+          throw new KeyNotFoundException();
+        }
+        return kv;
+      }
+    });
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    try {
+      return currentKeyCache.get(name);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName)
+      throws IOException {
+    try {
+      return keyVersionCache.get(versionName);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public List<String> getKeys() throws IOException {
+    return provider.getKeys();
+  }
+
+  @Override
+  public List<KeyVersion> getKeyVersions(String name)
+      throws IOException {
+    return provider.getKeyVersions(name);
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    return provider.getMetadata(name);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, byte[] material,
+      Options options) throws IOException {
+    return provider.createKey(name, material, options);
+  }
+
+  @Override
+  public KeyVersion createKey(String name,
+      Options options)
+      throws NoSuchAlgorithmException, IOException {
+    return provider.createKey(name, options);
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    Metadata metadata = provider.getMetadata(name);
+    List<String> versions = new ArrayList<String>(metadata.getVersions());
+    for (int i = 0; i < metadata.getVersions(); i++) {
+      versions.add(KeyProvider.buildVersionName(name, i));
+    }
+    provider.deleteKey(name);
+    currentKeyCache.invalidate(name);
+    keyVersionCache.invalidateAll(versions);
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    KeyVersion key = provider.rollNewVersion(name, material);
+    currentKeyCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    KeyVersion key = provider.rollNewVersion(name);
+    currentKeyCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    provider.flush();
+  }
+
+  @Override
+  public Metadata[] getKeysMetadata(String ... keyNames)
+      throws IOException {
+    return provider.getKeysMetadata(keyNames);
+  }
+
+  @Override
+  public boolean isTransient() {
+    return provider.isTransient();
+  }
+
+}

+ 94 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URL;
+
+/**
+ * Utility class to load KMS configuration files.
+ */
+public class KMSConfiguration {
+
+  public static final String KMS_CONFIG_DIR = "kms.config.dir";
+  public static final String KMS_SITE_XML = "kms-site.xml";
+  public static final String KMS_ACLS_XML = "kms-acls.xml";
+
+  public static final String CONFIG_PREFIX = "hadoop.kms.";
+
+  public static final String KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
+      "cache.timeout.ms";
+  public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 1000; // 10 secs
+
+  static Configuration getConfiguration(boolean loadHadoopDefaults,
+      String ... resources) {
+    Configuration conf = new Configuration(loadHadoopDefaults);
+    String confDir = System.getProperty(KMS_CONFIG_DIR);
+    if (confDir != null) {
+      try {
+        if (!confDir.startsWith("/")) {
+          throw new RuntimeException("System property '" + KMS_CONFIG_DIR +
+              "' must be an absolute path: " + confDir);
+        }
+        if (!confDir.endsWith("/")) {
+          confDir += "/";
+        }
+        for (String resource : resources) {
+          conf.addResource(new URL("file://" + confDir + resource));
+        }
+      } catch (MalformedURLException ex) {
+        throw new RuntimeException(ex);
+      }
+    } else {
+      for (String resource : resources) {
+        conf.addResource(resource);
+      }
+    }
+    return conf;
+  }
+
+  public static Configuration getKMSConf() {
+    return getConfiguration(true, "core-site.xml", KMS_SITE_XML);
+  }
+
+  public static Configuration getACLsConf() {
+    return getConfiguration(false, KMS_ACLS_XML);
+  }
+
+  public static boolean isACLsFileNewer(long time) {
+    boolean newer = false;
+    String confDir = System.getProperty(KMS_CONFIG_DIR);
+    if (confDir != null) {
+      if (!confDir.startsWith("/")) {
+        throw new RuntimeException("System property '" + KMS_CONFIG_DIR +
+            "' must be an absolute path: " + confDir);
+      }
+      if (!confDir.endsWith("/")) {
+        confDir += "/";
+      }
+      File f = new File(confDir, KMS_ACLS_XML);
+      // at least 100ms newer than time, we do this to ensure the file
+      // has been properly closed/flushed
+      newer = f.lastModified() - time > 100;
+    }
+    return newer;
+  }
+}

+ 113 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import com.sun.jersey.api.container.ContainerException;
+import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.ExceptionMapper;
+import javax.ws.rs.ext.Provider;
+import java.io.IOException;
+import java.security.Principal;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Jersey provider that converts KMS exceptions into detailed HTTP errors.
+ */
+@Provider
+@InterfaceAudience.Private
+public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
+  private static Logger LOG =
+      LoggerFactory.getLogger(KMSExceptionsProvider.class);
+
+  private static final String ENTER = System.getProperty("line.separator");
+
+  protected Response createResponse(Response.Status status, Throwable ex) {
+    Map<String, Object> json = new LinkedHashMap<String, Object>();
+    json.put(KMSRESTConstants.ERROR_EXCEPTION_JSON, ex.getClass().getName());
+    json.put(KMSRESTConstants.ERROR_MESSAGE_JSON, getOneLineMessage(ex));
+    log(status, ex);
+    return Response.status(status).type(MediaType.APPLICATION_JSON).
+        entity(json).build();
+  }
+
+  protected String getOneLineMessage(Throwable exception) {
+    String message = exception.getMessage();
+    if (message != null) {
+      int i = message.indexOf(ENTER);
+      if (i > -1) {
+        message = message.substring(0, i);
+      }
+    }
+    return message;
+  }
+
+  /**
+   * Maps different exceptions thrown by KMS to HTTP status codes.
+   */
+  @Override
+  public Response toResponse(Exception exception) {
+    Response.Status status;
+    boolean doAudit = true;
+    Throwable throwable = exception;
+    if (exception instanceof ContainerException) {
+      throwable = exception.getCause();
+    }
+    if (throwable instanceof SecurityException) {
+      status = Response.Status.FORBIDDEN;
+    } else if (throwable instanceof AuthenticationException) {
+      status = Response.Status.FORBIDDEN;
+      // we don't audit here because we did it already when checking access
+      doAudit = false;
+    } else if (throwable instanceof AccessControlException) {
+      status = Response.Status.FORBIDDEN;
+    } else if (exception instanceof IOException) {
+      status = Response.Status.INTERNAL_SERVER_ERROR;
+    } else if (exception instanceof UnsupportedOperationException) {
+      status = Response.Status.BAD_REQUEST;
+    } else if (exception instanceof IllegalArgumentException) {
+      status = Response.Status.BAD_REQUEST;
+    } else {
+      status = Response.Status.INTERNAL_SERVER_ERROR;
+    }
+    if (doAudit) {
+      KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(),
+          KMSMDCFilter.getURL(), getOneLineMessage(exception));
+    }
+    return createResponse(status, throwable);
+  }
+
+  protected void log(Response.Status status, Throwable ex) {
+    Principal principal = KMSMDCFilter.getPrincipal();
+    String method = KMSMDCFilter.getMethod();
+    String url = KMSMDCFilter.getURL();
+    String msg = getOneLineMessage(ex);
+    LOG.warn("User:{} Method:{} URL:{} Response:{}-{}", principal, method, url,
+        status, msg, ex);
+  }
+
+}

+ 54 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Map;
+
+@Provider
+@Consumes(MediaType.APPLICATION_JSON)
+@InterfaceAudience.Private
+public class KMSJSONReader implements MessageBodyReader<Map> {
+
+  @Override
+  public boolean isReadable(Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType) {
+    return type.isAssignableFrom(Map.class);
+  }
+
+  @Override
+  public Map readFrom(Class<Map> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType,
+      MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
+      throws IOException, WebApplicationException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.readValue(entityStream, type);
+  }
+}

+ 70 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Jersey provider that converts <code>Map</code>s and <code>List</code>s
+ * to their JSON representation.
+ */
+@Provider
+@Produces(MediaType.APPLICATION_JSON)
+@InterfaceAudience.Private
+public class KMSJSONWriter implements MessageBodyWriter<Object> {
+
+  @Override
+  public boolean isWriteable(Class<?> aClass, Type type,
+      Annotation[] annotations, MediaType mediaType) {
+    return Map.class.isAssignableFrom(aClass) ||
+        List.class.isAssignableFrom(aClass);
+  }
+
+  @Override
+  public long getSize(Object obj, Class<?> aClass, Type type,
+      Annotation[] annotations, MediaType mediaType) {
+    return -1;
+  }
+
+  @Override
+  public void writeTo(Object obj, Class<?> aClass, Type type,
+      Annotation[] annotations, MediaType mediaType,
+      MultivaluedMap<String, Object> stringObjectMultivaluedMap,
+      OutputStream outputStream) throws IOException, WebApplicationException {
+    Writer writer = new OutputStreamWriter(outputStream);
+    ObjectMapper jsonMapper = new ObjectMapper();
+    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
+  }
+
+}

+ 92 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.security.Principal;
+
+/**
+ * Servlet filter that captures context of the HTTP request to be use in the
+ * scope of KMS calls on the server side.
+ */
+@InterfaceAudience.Private
+public class KMSMDCFilter implements Filter {
+
+  private static class Data {
+    private Principal principal;
+    private String method;
+    private StringBuffer url;
+
+    private Data(Principal principal, String method, StringBuffer url) {
+      this.principal = principal;
+      this.method = method;
+      this.url = url;
+    }
+  }
+
+  private static ThreadLocal<Data> DATA_TL = new ThreadLocal<Data>();
+
+  public static Principal getPrincipal() {
+    return DATA_TL.get().principal;
+  }
+
+  public static String getMethod() {
+    return DATA_TL.get().method;
+  }
+
+  public static String getURL() {
+    return DATA_TL.get().url.toString();
+  }
+
+  @Override
+  public void init(FilterConfig config) throws ServletException {
+  }
+
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response,
+      FilterChain chain)
+      throws IOException, ServletException {
+    try {
+      DATA_TL.remove();
+      Principal principal = ((HttpServletRequest) request).getUserPrincipal();
+      String method = ((HttpServletRequest) request).getMethod();
+      StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
+      String queryString = ((HttpServletRequest) request).getQueryString();
+      if (queryString != null) {
+        requestURL.append("?").append(queryString);
+      }
+      DATA_TL.set(new Data(principal, method, requestURL));
+      chain.doFilter(request, response);
+    } finally {
+      DATA_TL.remove();
+    }
+  }
+
+  @Override
+  public void destroy() {
+  }
+}

+ 80 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java

@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * JSON utility methods for the KMS.
+ */
+@InterfaceAudience.Private
+public class KMSServerJSONUtils {
+  @SuppressWarnings("unchecked")
+  public static Map toJSON(KeyProvider.KeyVersion keyVersion) {
+    Map json = new LinkedHashMap();
+    if (keyVersion != null) {
+      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
+          keyVersion.getVersionName());
+      json.put(KMSRESTConstants.MATERIAL_FIELD, keyVersion.getMaterial());
+    }
+    return json;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static List toJSON(List<KeyProvider.KeyVersion> keyVersions) {
+    List json = new ArrayList();
+    if (keyVersions != null) {
+      for (KeyProvider.KeyVersion version : keyVersions) {
+        json.add(toJSON(version));
+      }
+    }
+    return json;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
+    Map json = new LinkedHashMap();
+    if (meta != null) {
+      json.put(KMSRESTConstants.NAME_FIELD, keyName);
+      json.put(KMSRESTConstants.CIPHER_FIELD, meta.getCipher());
+      json.put(KMSRESTConstants.LENGTH_FIELD, meta.getBitLength());
+      json.put(KMSRESTConstants.DESCRIPTION_FIELD, meta.getDescription());
+      json.put(KMSRESTConstants.CREATED_FIELD,
+          meta.getCreated().getTime());
+      json.put(KMSRESTConstants.VERSIONS_FIELD,
+          (long) meta.getVersions());
+    }
+    return json;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static List toJSON(String[] keyNames, KeyProvider.Metadata[] metas) {
+    List json = new ArrayList();
+    for (int i = 0; i < keyNames.length; i++) {
+      json.add(toJSON(keyNames[i], metas[i]));
+    }
+    return json;
+  }
+}

+ 214 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import com.codahale.metrics.JmxReporter;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricRegistry;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.log4j.PropertyConfigurator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.bridge.SLF4JBridgeHandler;
+
+import javax.servlet.ServletContextEvent;
+import javax.servlet.ServletContextListener;
+import java.io.File;
+import java.net.URL;
+import java.util.List;
+
+@InterfaceAudience.Private
+public class KMSWebApp implements ServletContextListener {
+
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
+  private static final String METRICS_PREFIX = "hadoop.kms.";
+  private static final String ADMIN_CALLS_METER = METRICS_PREFIX +
+      "admin.calls.meter";
+  private static final String KEY_CALLS_METER = METRICS_PREFIX +
+      "key.calls.meter";
+  private static final String INVALID_CALLS_METER = METRICS_PREFIX +
+      "invalid.calls.meter";
+  private static final String UNAUTHORIZED_CALLS_METER = METRICS_PREFIX +
+      "unauthorized.calls.meter";
+  private static final String UNAUTHENTICATED_CALLS_METER = METRICS_PREFIX +
+      "unauthenticated.calls.meter";
+
+  private static Logger LOG;
+  private static MetricRegistry metricRegistry;
+
+  private JmxReporter jmxReporter;
+  private static Configuration kmsConf;
+  private static KMSACLs acls;
+  private static Meter adminCallsMeter;
+  private static Meter keyCallsMeter;
+  private static Meter unauthorizedCallsMeter;
+  private static Meter unauthenticatedCallsMeter;
+  private static Meter invalidCallsMeter;
+  private static KeyProvider keyProvider;
+
+  static {
+    SLF4JBridgeHandler.removeHandlersForRootLogger();
+    SLF4JBridgeHandler.install();
+  }
+
+  private void initLogging(String confDir) {
+    if (System.getProperty("log4j.configuration") == null) {
+      System.setProperty("log4j.defaultInitOverride", "true");
+      boolean fromClasspath = true;
+      File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+      if (log4jConf.exists()) {
+        PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+        fromClasspath = false;
+      } else {
+        ClassLoader cl = Thread.currentThread().getContextClassLoader();
+        URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+        if (log4jUrl != null) {
+          PropertyConfigurator.configure(log4jUrl);
+        }
+      }
+      LOG = LoggerFactory.getLogger(KMSWebApp.class);
+      LOG.debug("KMS log starting");
+      if (fromClasspath) {
+        LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+        LOG.warn("Logging with INFO level to standard output");
+      }
+    } else {
+      LOG = LoggerFactory.getLogger(KMSWebApp.class);
+    }
+  }
+
+  @Override
+  public void contextInitialized(ServletContextEvent sce) {
+    try {
+      String confDir = System.getProperty(KMSConfiguration.KMS_CONFIG_DIR);
+      if (confDir == null) {
+        throw new RuntimeException("System property '" +
+            KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+      }
+      kmsConf = KMSConfiguration.getKMSConf();
+      initLogging(confDir);
+      LOG.info("-------------------------------------------------------------");
+      LOG.info("  Java runtime version : {}", System.getProperty(
+          "java.runtime.version"));
+      LOG.info("  KMS Hadoop Version: " + VersionInfo.getVersion());
+      LOG.info("-------------------------------------------------------------");
+
+      acls = new KMSACLs();
+      acls.startReloader();
+
+      metricRegistry = new MetricRegistry();
+      jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
+      jmxReporter.start();
+      adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
+      keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
+      invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
+          new Meter());
+      unauthorizedCallsMeter = metricRegistry.register(UNAUTHORIZED_CALLS_METER,
+          new Meter());
+      unauthenticatedCallsMeter = metricRegistry.register(
+          UNAUTHENTICATED_CALLS_METER, new Meter());
+
+      // this is required for the the JMXJsonServlet to work properly.
+      // the JMXJsonServlet is behind the authentication filter,
+      // thus the '*' ACL.
+      sce.getServletContext().setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,
+          kmsConf);
+      sce.getServletContext().setAttribute(HttpServer2.ADMINS_ACL,
+          new AccessControlList(AccessControlList.WILDCARD_ACL_VALUE));
+
+      // intializing the KeyProvider
+
+      List<KeyProvider> providers = KeyProviderFactory.getProviders(kmsConf);
+      if (providers.isEmpty()) {
+        throw new IllegalStateException("No KeyProvider has been defined");
+      }
+      if (providers.size() > 1) {
+        LOG.warn("There is more than one KeyProvider configured '{}', using " +
+            "the first provider",
+            kmsConf.get(KeyProviderFactory.KEY_PROVIDER_PATH));
+      }
+      keyProvider = providers.get(0);
+      long timeOutMillis =
+          kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
+              KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
+      keyProvider = new KMSCacheKeyProvider(keyProvider, timeOutMillis);
+
+      LOG.info("KMS Started");
+    } catch (Throwable ex) {
+      System.out.println();
+      System.out.println("ERROR: Hadoop KMS could not be started");
+      System.out.println();
+      System.out.println("REASON: " + ex.toString());
+      System.out.println();
+      System.out.println("Stacktrace:");
+      System.out.println("---------------------------------------------------");
+      ex.printStackTrace(System.out);
+      System.out.println("---------------------------------------------------");
+      System.out.println();
+      System.exit(1);
+    }
+  }
+
+  @Override
+  public void contextDestroyed(ServletContextEvent sce) {
+    acls.stopReloader();
+    jmxReporter.stop();
+    jmxReporter.close();
+    metricRegistry = null;
+    LOG.info("KMS Stopped");
+  }
+
+  public static Configuration getConfiguration() {
+    return new Configuration(kmsConf);
+  }
+
+  public static KMSACLs getACLs() {
+    return acls;
+  }
+
+  public static Meter getAdminCallsMeter() {
+    return adminCallsMeter;
+  }
+
+  public static Meter getKeyCallsMeter() {
+    return keyCallsMeter;
+  }
+
+  public static Meter getInvalidCallsMeter() {
+    return invalidCallsMeter;
+  }
+
+  public static Meter getUnauthorizedCallsMeter() {
+    return unauthorizedCallsMeter;
+  }
+
+  public static Meter getUnauthenticatedCallsMeter() {
+    return unauthenticatedCallsMeter;
+  }
+
+  public static KeyProvider getKeyProvider() {
+    return keyProvider;
+  }
+}

+ 181 - 0
hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh

@@ -0,0 +1,181 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+  ls=`ls -ld "${PRG}"`
+  link=`expr "$ls" : '.*-> \(.*\)$'`
+  if expr "$link" : '/.*' > /dev/null; then
+    PRG="$link"
+  else
+    PRG=`dirname "${PRG}"`/"$link"
+  fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+
+function print() {
+  if [ "${KMS_SILENT}" != "true" ]; then
+    echo "$@"
+  fi
+}
+
+# if KMS_HOME is already set warn it will be ignored
+#
+if [ "${KMS_HOME}" != "" ]; then
+  echo "WARNING: current setting of KMS_HOME ignored"
+fi
+
+print
+
+# setting KMS_HOME to the installation dir, it cannot be changed
+#
+export KMS_HOME=${BASEDIR}
+kms_home=${KMS_HOME}
+print "Setting KMS_HOME:          ${KMS_HOME}"
+
+# if the installation has a env file, source it
+# this is for native packages installations
+#
+if [ -e "${KMS_HOME}/bin/kms-env.sh" ]; then
+  print "Sourcing:                    ${KMS_HOME}/bin/kms-env.sh"
+  source ${KMS_HOME}/bin/kms-env.sh
+  grep "^ *export " ${KMS_HOME}/bin/kms-env.sh | sed 's/ *export/  setting/'
+fi
+
+# verify that the sourced env file didn't change KMS_HOME
+# if so, warn and revert
+#
+if [ "${KMS_HOME}" != "${kms_home}" ]; then
+  print "WARN: KMS_HOME resetting to ''${KMS_HOME}'' ignored"
+  export KMS_HOME=${kms_home}
+  print "  using KMS_HOME:        ${KMS_HOME}"
+fi
+
+if [ "${KMS_CONFIG}" = "" ]; then
+  export KMS_CONFIG=${KMS_HOME}/etc/hadoop
+  print "Setting KMS_CONFIG:        ${KMS_CONFIG}"
+else
+  print "Using   KMS_CONFIG:        ${KMS_CONFIG}"
+fi
+kms_config=${KMS_CONFIG}
+
+# if the configuration dir has a env file, source it
+#
+if [ -e "${KMS_CONFIG}/kms-env.sh" ]; then
+  print "Sourcing:                    ${KMS_CONFIG}/kms-env.sh"
+  source ${KMS_CONFIG}/kms-env.sh
+  grep "^ *export " ${KMS_CONFIG}/kms-env.sh | sed 's/ *export/  setting/'
+fi
+
+# verify that the sourced env file didn't change KMS_HOME
+# if so, warn and revert
+#
+if [ "${KMS_HOME}" != "${kms_home}" ]; then
+  echo "WARN: KMS_HOME resetting to ''${KMS_HOME}'' ignored"
+  export KMS_HOME=${kms_home}
+fi
+
+# verify that the sourced env file didn't change KMS_CONFIG
+# if so, warn and revert
+#
+if [ "${KMS_CONFIG}" != "${kms_config}" ]; then
+  echo "WARN: KMS_CONFIG resetting to ''${KMS_CONFIG}'' ignored"
+  export KMS_CONFIG=${kms_config}
+fi
+
+if [ "${KMS_LOG}" = "" ]; then
+  export KMS_LOG=${KMS_HOME}/logs
+  print "Setting KMS_LOG:           ${KMS_LOG}"
+else
+  print "Using   KMS_LOG:           ${KMS_LOG}"
+fi
+
+if [ ! -f ${KMS_LOG} ]; then
+  mkdir -p ${KMS_LOG}
+fi
+
+if [ "${KMS_TEMP}" = "" ]; then
+  export KMS_TEMP=${KMS_HOME}/temp
+  print "Setting KMS_TEMP:           ${KMS_TEMP}"
+else
+  print "Using   KMS_TEMP:           ${KMS_TEMP}"
+fi
+
+if [ ! -f ${KMS_TEMP} ]; then
+  mkdir -p ${KMS_TEMP}
+fi
+
+if [ "${KMS_HTTP_PORT}" = "" ]; then
+  export KMS_HTTP_PORT=16000
+  print "Setting KMS_HTTP_PORT:     ${KMS_HTTP_PORT}"
+else
+  print "Using   KMS_HTTP_PORT:     ${KMS_HTTP_PORT}"
+fi
+
+if [ "${KMS_ADMIN_PORT}" = "" ]; then
+  export KMS_ADMIN_PORT=`expr $KMS_HTTP_PORT +  1`
+  print "Setting KMS_ADMIN_PORT:     ${KMS_ADMIN_PORT}"
+else
+  print "Using   KMS_ADMIN_PORT:     ${KMS_ADMIN_PORT}"
+fi
+
+if [ "${KMS_SSL_KEYSTORE_FILE}" = "" ]; then
+  export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+  print "Setting KMS_SSL_KEYSTORE_FILE:     ${KMS_SSL_KEYSTORE_FILE}"
+else
+  print "Using   KMS_SSL_KEYSTORE_FILE:     ${KMS_SSL_KEYSTORE_FILE}"
+fi
+
+if [ "${KMS_SSL_KEYSTORE_PASS}" = "" ]; then
+  export KMS_SSL_KEYSTORE_PASS=password
+  print "Setting KMS_SSL_KEYSTORE_PASS:     ${KMS_SSL_KEYSTORE_PASS}"
+else
+  print "Using   KMS_SSL_KEYSTORE_PASS:     ${KMS_SSL_KEYSTORE_PASS}"
+fi
+
+if [ "${CATALINA_BASE}" = "" ]; then
+  export CATALINA_BASE=${KMS_HOME}/share/hadoop/kms/tomcat
+  print "Setting CATALINA_BASE:       ${CATALINA_BASE}"
+else
+  print "Using   CATALINA_BASE:       ${CATALINA_BASE}"
+fi
+
+if [ "${KMS_CATALINA_HOME}" = "" ]; then
+  export KMS_CATALINA_HOME=${CATALINA_BASE}
+  print "Setting KMS_CATALINA_HOME:       ${KMS_CATALINA_HOME}"
+else
+  print "Using   KMS_CATALINA_HOME:       ${KMS_CATALINA_HOME}"
+fi
+
+if [ "${CATALINA_OUT}" = "" ]; then
+  export CATALINA_OUT=${KMS_LOG}/kms-catalina.out
+  print "Setting CATALINA_OUT:        ${CATALINA_OUT}"
+else
+  print "Using   CATALINA_OUT:        ${CATALINA_OUT}"
+fi
+
+if [ "${CATALINA_PID}" = "" ]; then
+  export CATALINA_PID=/tmp/kms.pid
+  print "Setting CATALINA_PID:        ${CATALINA_PID}"
+else
+  print "Using   CATALINA_PID:        ${CATALINA_PID}"
+fi
+
+print

+ 60 - 0
hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh

@@ -0,0 +1,60 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+  ls=`ls -ld "${PRG}"`
+  link=`expr "$ls" : '.*-> \(.*\)$'`
+  if expr "$link" : '/.*' > /dev/null; then
+    PRG="$link"
+  else
+    PRG=`dirname "${PRG}"`/"$link"
+  fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+KMS_SILENT=${KMS_SILENT:-true}
+
+source ${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/kms-config.sh
+
+# The Java System property 'kms.http.port' it is not used by Kms,
+# it is used in Tomcat's server.xml configuration file
+#
+print "Using   CATALINA_OPTS:       ${CATALINA_OPTS}"
+
+catalina_opts="-Dkms.home.dir=${KMS_HOME}";
+catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}";
+catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
+catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";
+catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}";
+catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}";
+catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}";
+catalina_opts="${catalina_opts} -Dkms.ssl.keystore.pass=${KMS_SSL_KEYSTORE_PASS}";
+
+print "Adding to CATALINA_OPTS:     ${catalina_opts}"
+
+export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
+
+# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
+#
+if [ "${1}" = "stop" ]; then
+  export JAVA_OPTS=${CATALINA_OPTS}
+fi
+
+exec ${KMS_CATALINA_HOME}/bin/catalina.sh "$@"

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/proto-journal-web.xml → hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/WEB-INF/web.xml

@@ -4,14 +4,13 @@
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
 
-    http://www.apache.org/licenses/LICENSE-2.0
+  http://www.apache.org/licenses/LICENSE-2.0
 
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
+  limitations under the License.
 -->
 <web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
-@journal.servlet.definitions@
 </web-app>

+ 16 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/proto-secondary-web.xml → hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/index.html

@@ -1,17 +1,27 @@
-<?xml version="1.0" encoding="UTF-8"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
 
-    http://www.apache.org/licenses/LICENSE-2.0
+  http://www.apache.org/licenses/LICENSE-2.0
 
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
+  limitations under the License.
+
+
 -->
-<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
-@secondary.servlet.definitions@
-</web-app>
+<html>
+<head>
+  <title>Hadoop KMS</title>
+</head>
+<body>
+<h1>Hadoop KMS</h1>
+<ul>
+  <li>KMS REST API end-point <b>/kms/v1/*</b></li>
+  <li><a href="/kms/jmx">KMS JMX JSON end-point</a></li>
+</ul>
+</body>
+</html>

+ 67 - 0
hadoop-common-project/hadoop-kms/src/main/tomcat/logging.properties

@@ -0,0 +1,67 @@
+#
+#  All Rights Reserved.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
+
+.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
+
+############################################################
+# Handler specific properties.
+# Describes specific configuration info for Handlers.
+############################################################
+
+1catalina.org.apache.juli.FileHandler.level = FINE
+1catalina.org.apache.juli.FileHandler.directory = ${kms.log.dir}
+1catalina.org.apache.juli.FileHandler.prefix = kms-catalina.
+
+2localhost.org.apache.juli.FileHandler.level = FINE
+2localhost.org.apache.juli.FileHandler.directory = ${kms.log.dir}
+2localhost.org.apache.juli.FileHandler.prefix = kms-localhost.
+
+3manager.org.apache.juli.FileHandler.level = FINE
+3manager.org.apache.juli.FileHandler.directory = ${kms.log.dir}
+3manager.org.apache.juli.FileHandler.prefix = kms-manager.
+
+4host-manager.org.apache.juli.FileHandler.level = FINE
+4host-manager.org.apache.juli.FileHandler.directory = ${kms.log.dir}
+4host-manager.org.apache.juli.FileHandler.prefix = kms-host-manager.
+
+java.util.logging.ConsoleHandler.level = FINE
+java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
+
+
+############################################################
+# Facility specific properties.
+# Provides extra control for each logger.
+############################################################
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler
+
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
+org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler
+
+# For example, set the com.xyz.foo logger to only log SEVERE
+# messages:
+#org.apache.catalina.startup.ContextConfig.level = FINE
+#org.apache.catalina.startup.HostConfig.level = FINE
+#org.apache.catalina.session.ManagerBase.level = FINE
+#org.apache.catalina.core.AprLifecycleListener.level=FINE

+ 153 - 0
hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml

@@ -0,0 +1,153 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+
+   All Rights Reserved.
+
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="${kms.admin.port}" shutdown="SHUTDOWN">
+
+  <!--APR library loader. Documentation at /docs/apr.html -->
+  <Listener className="org.apache.catalina.core.AprLifecycleListener"
+            SSLEngine="on"/>
+  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
+  <Listener className="org.apache.catalina.core.JasperListener"/>
+  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
+  <Listener
+    className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
+  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
+  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
+  <Listener
+    className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
+
+  <!-- Global JNDI resources
+       Documentation at /docs/jndi-resources-howto.html
+  -->
+  <GlobalNamingResources>
+    <!-- Editable user database that can also be used by
+         UserDatabaseRealm to authenticate users
+    -->
+    <Resource name="UserDatabase" auth="Container"
+              type="org.apache.catalina.UserDatabase"
+              description="User database that can be updated and saved"
+              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+              pathname="conf/tomcat-users.xml"/>
+  </GlobalNamingResources>
+
+  <!-- A "Service" is a collection of one or more "Connectors" that share
+       a single "Container" Note:  A "Service" is not itself a "Container",
+       so you may not define subcomponents such as "Valves" at this level.
+       Documentation at /docs/config/service.html
+   -->
+  <Service name="Catalina">
+
+    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
+    <!--
+    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
+        maxThreads="150" minSpareThreads="4"/>
+    -->
+
+
+    <!-- A "Connector" represents an endpoint by which requests are received
+         and responses are returned. Documentation at :
+         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
+         Java AJP  Connector: /docs/config/ajp.html
+         APR (HTTP/AJP) Connector: /docs/apr.html
+         Define a non-SSL HTTP/1.1 Connector on port ${kms.http.port}
+    -->
+    <Connector port="${kms.http.port}" protocol="HTTP/1.1"
+               connectionTimeout="20000"
+               redirectPort="8443"/>
+    <!-- A "Connector" using the shared thread pool-->
+    <!--
+    <Connector executor="tomcatThreadPool"
+               port="${kms.http.port}" protocol="HTTP/1.1"
+               connectionTimeout="20000"
+               redirectPort="8443" />
+    -->
+    <!-- Define a SSL HTTP/1.1 Connector on port 8443
+         This connector uses the JSSE configuration, when using APR, the
+         connector should be using the OpenSSL style configuration
+         described in the APR documentation -->
+    <!--
+    <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
+               maxThreads="150" scheme="https" secure="true"
+               clientAuth="false" sslProtocol="TLS" />
+    -->
+
+    <!-- Define an AJP 1.3 Connector on port 8009 -->
+
+
+    <!-- An Engine represents the entry point (within Catalina) that processes
+ every request.  The Engine implementation for Tomcat stand alone
+ analyzes the HTTP headers included with the request, and passes them
+ on to the appropriate Host (virtual host).
+ Documentation at /docs/config/engine.html -->
+
+    <!-- You should set jvmRoute to support load-balancing via AJP ie :
+    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
+    -->
+    <Engine name="Catalina" defaultHost="localhost">
+
+      <!--For clustering, please take a look at documentation at:
+          /docs/cluster-howto.html  (simple how to)
+          /docs/config/cluster.html (reference documentation) -->
+      <!--
+      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
+      -->
+
+      <!-- The request dumper valve dumps useful debugging information about
+           the request and response data received and sent by Tomcat.
+           Documentation at: /docs/config/valve.html -->
+      <!--
+      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
+      -->
+
+      <!-- This Realm uses the UserDatabase configured in the global JNDI
+           resources under the key "UserDatabase".  Any edits
+           that are performed against this UserDatabase are immediately
+           available for use by the Realm.  -->
+      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+             resourceName="UserDatabase"/>
+
+      <!-- Define the default virtual host
+           Note: XML Schema validation will not work with Xerces 2.2.
+       -->
+      <Host name="localhost" appBase="webapps"
+            unpackWARs="true" autoDeploy="true"
+            xmlValidation="false" xmlNamespaceAware="false">
+
+        <!-- SingleSignOn valve, share authentication between web applications
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
+        -->
+
+        <!-- Access log processes all example.
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
+               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
+        -->
+
+      </Host>
+    </Engine>
+  </Service>
+</Server>

+ 135 - 0
hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml

@@ -0,0 +1,135 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+
+   All Rights Reserved.
+
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="${kms.admin.port}" shutdown="SHUTDOWN">
+
+  <!--APR library loader. Documentation at /docs/apr.html -->
+  <Listener className="org.apache.catalina.core.AprLifecycleListener"
+            SSLEngine="on"/>
+  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
+  <Listener className="org.apache.catalina.core.JasperListener"/>
+  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
+  <Listener
+    className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
+  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
+  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
+  <Listener
+    className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
+
+  <!-- Global JNDI resources
+       Documentation at /docs/jndi-resources-howto.html
+  -->
+  <GlobalNamingResources>
+    <!-- Editable user database that can also be used by
+         UserDatabaseRealm to authenticate users
+    -->
+    <Resource name="UserDatabase" auth="Container"
+              type="org.apache.catalina.UserDatabase"
+              description="User database that can be updated and saved"
+              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+              pathname="conf/tomcat-users.xml"/>
+  </GlobalNamingResources>
+
+  <!-- A "Service" is a collection of one or more "Connectors" that share
+       a single "Container" Note:  A "Service" is not itself a "Container",
+       so you may not define subcomponents such as "Valves" at this level.
+       Documentation at /docs/config/service.html
+   -->
+  <Service name="Catalina">
+
+    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
+    <!--
+    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
+        maxThreads="150" minSpareThreads="4"/>
+    -->
+
+    <!-- Define a SSL HTTP/1.1 Connector on port 8443
+         This connector uses the JSSE configuration, when using APR, the
+         connector should be using the OpenSSL style configuration
+         described in the APR documentation -->
+    <Connector port="${kms.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
+               maxThreads="150" scheme="https" secure="true"
+               clientAuth="false" sslProtocol="TLS"
+               keystoreFile="${kms.ssl.keystore.file}"
+               keystorePass="${kms.ssl.keystore.pass}"/>
+
+    <!-- Define an AJP 1.3 Connector on port 8009 -->
+
+
+    <!-- An Engine represents the entry point (within Catalina) that processes
+ every request.  The Engine implementation for Tomcat stand alone
+ analyzes the HTTP headers included with the request, and passes them
+ on to the appropriate Host (virtual host).
+ Documentation at /docs/config/engine.html -->
+
+    <!-- You should set jvmRoute to support load-balancing via AJP ie :
+    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
+    -->
+    <Engine name="Catalina" defaultHost="localhost">
+
+      <!--For clustering, please take a look at documentation at:
+          /docs/cluster-howto.html  (simple how to)
+          /docs/config/cluster.html (reference documentation) -->
+      <!--
+      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
+      -->
+
+      <!-- The request dumper valve dumps useful debugging information about
+           the request and response data received and sent by Tomcat.
+           Documentation at: /docs/config/valve.html -->
+      <!--
+      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
+      -->
+
+      <!-- This Realm uses the UserDatabase configured in the global JNDI
+           resources under the key "UserDatabase".  Any edits
+           that are performed against this UserDatabase are immediately
+           available for use by the Realm.  -->
+      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+             resourceName="UserDatabase"/>
+
+      <!-- Define the default virtual host
+           Note: XML Schema validation will not work with Xerces 2.2.
+       -->
+      <Host name="localhost" appBase="webapps"
+            unpackWARs="true" autoDeploy="true"
+            xmlValidation="false" xmlNamespaceAware="false">
+
+        <!-- SingleSignOn valve, share authentication between web applications
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
+        -->
+
+        <!-- Access log processes all example.
+             Documentation at: /docs/config/valve.html -->
+        <!--
+        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
+               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
+        -->
+
+      </Host>
+    </Engine>
+  </Service>
+</Server>

+ 78 - 0
hadoop-common-project/hadoop-kms/src/main/webapp/WEB-INF/web.xml

@@ -0,0 +1,78 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
+
+  <listener>
+    <listener-class>org.apache.hadoop.crypto.key.kms.server.KMSWebApp</listener-class>
+  </listener>
+
+  <servlet>
+    <servlet-name>webservices-driver</servlet-name>
+    <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+    <init-param>
+      <param-name>com.sun.jersey.config.property.packages</param-name>
+      <param-value>org.apache.hadoop.crypto.key.kms.server</param-value>
+    </init-param>
+
+    <!-- Enables detailed Jersey request/response logging -->
+    <!--
+    <init-param>
+        <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+        <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+    </init-param>
+    <init-param>
+        <param-name>com.sun.jersey.spi.container.ContainerResponseFilters</param-name>
+        <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+    </init-param>
+    -->
+    <load-on-startup>1</load-on-startup>
+  </servlet>
+
+  <servlet>
+    <servlet-name>jmx-servlet</servlet-name>
+    <servlet-class>org.apache.hadoop.jmx.JMXJsonServlet</servlet-class>
+  </servlet>
+
+  <servlet-mapping>
+    <servlet-name>webservices-driver</servlet-name>
+    <url-pattern>/*</url-pattern>
+  </servlet-mapping>
+
+  <servlet-mapping>
+    <servlet-name>jmx-servlet</servlet-name>
+    <url-pattern>/jmx</url-pattern>
+  </servlet-mapping>
+
+  <filter>
+    <filter-name>authFilter</filter-name>
+    <filter-class>org.apache.hadoop.crypto.key.kms.server.KMSAuthenticationFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>MDCFilter</filter-name>
+    <filter-class>org.apache.hadoop.crypto.key.kms.server.KMSMDCFilter</filter-class>
+  </filter>
+
+  <filter-mapping>
+    <filter-name>authFilter</filter-name>
+    <url-pattern>/*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>MDCFilter</filter-name>
+    <url-pattern>/*</url-pattern>
+  </filter-mapping>
+
+</web-app>

+ 487 - 0
hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm

@@ -0,0 +1,487 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Hadoop KMS - Documentation Sets ${project.version}
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
+
+  Hadoop KMS is a cryptographic key management server based on Hadoop's
+  <<KeyProvider>> API.
+
+  It provides a client and a server components which communicate over
+  HTTP using a REST API.
+
+  The client is a KeyProvider implementation interacts with the KMS
+  using the KMS HTTP REST API.
+
+  KMS and its client have built-in security and they support HTTP SPNEGO
+  Kerberos authentication and HTTPS secure transport.
+
+  KMS is a Java web-application and it runs using a pre-configured Tomcat
+  bundled with the Hadoop distribution.
+
+* KMS Client Configuration
+
+  The KMS client <<<KeyProvider>>> uses the <<kms>> scheme, and the embedded
+  URL must be the URL of the KMS. For example, for a KMS running
+  on <<<http://localhost:16000/kms>>>, the KeyProvider URI is
+  <<<kms://http@localhost:16000/kms>>>. And, for a KMS running on
+  <<<https://localhost:16000/kms>>>, the KeyProvider URI is
+  <<<kms://https@localhost:16000/kms>>>
+
+* KMS
+
+** KMS Configuration
+
+  Configure the KMS backing KeyProvider properties
+  in the <<<etc/hadoop/kms-site.xml>>> configuration file:
+
++---+
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.java-keystore-provider.password-file</name>
+    <value>kms.keystore.password</value>
+  </property>
++---+
+
+  The password file is looked up in the Hadoop's configuration directory via the
+  classpath.
+
+  NOTE: You need to restart the KMS for the configuration changes to take
+  effect.
+
+** KMS Cache
+
+  KMS caches keys for short period of time to avoid excessive hits to the
+  underlying key provider.
+
+  The cache is used with the following 2 methods only, <<<getCurrentKey()>>>
+  and <<<getKeyVersion()>>>.
+
+  For the <<<getCurrentKey()>>> method, cached entries are kept for a maximum
+  of 1000 millisecond regardless the number of times the key is being access
+  (to avoid stale keys to be considered current).
+
+  For the <<<getKeyVersion()>>> method, cached entries are kept with a default
+  inactivity timeout of 10000 milliseconds. This time out is configurable via
+  the following property in the <<<etc/hadoop/kms-site.xml>>> configuration
+  file:
+
++---+
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>10000</value>
+  </property>
++---+
+
+** Start/Stop the KMS
+
+  To start/stop KMS use KMS's bin/kms.sh script. For example:
+
++---+
+hadoop-${project.version} $ sbin/kms.sh start
++---+
+
+  NOTE: Invoking the script without any parameters list all possible
+  parameters (start, stop, run, etc.). The <<<kms.sh>>> script is a wrapper
+  for Tomcat's <<<catalina.sh>>> script that sets the environment variables
+  and Java System properties required to run KMS.
+
+** Embedded Tomcat Configuration
+
+  To configure the embedded Tomcat go to the <<<share/hadoop/kms/tomcat/conf>>>.
+
+  KMS pre-configures the HTTP and Admin ports in Tomcat's <<<server.xml>>> to
+  16000 and 16001.
+
+  Tomcat logs are also preconfigured to go to Hadoop's <<<logs/>>> directory.
+
+  The following environment variables (which can be set in KMS's
+  <<<etc/hadoop/kms-env.sh>>> script) can be used to alter those values:
+
+  * KMS_HTTP_PORT
+
+  * KMS_ADMIN_PORT
+
+  * KMS_LOG
+
+  NOTE: You need to restart the KMS for the configuration changes to take
+  effect.
+
+** KMS Security Configuration
+
+*** Enabling Kerberos HTTP SPNEGO Authentication
+
+  Configure the Kerberos <<<etc/krb5.conf>>> file with the information of your
+  KDC server.
+
+  Create a service principal and its keytab for the KMS, it must be an
+  <<<HTTP>>> service principal.
+
+  Configure KMS <<<etc/hadoop/kms-site.xml>>> with the correct security values,
+  for example:
+
++---+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>kerberos</value>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+  </property>
++---+
+
+  NOTE: You need to restart the KMS for the configuration changes to take
+  effect.
+
+*** KMS over HTTPS (SSL)
+
+  To configure KMS to work over HTTPS the following 2 properties must be
+  set in the <<<etc/hadoop/kms_env.sh>>> script (shown with default values):
+
+    * KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+    * KMS_SSL_KEYSTORE_PASS=password
+
+  In the KMS <<<tomcat/conf>>> directory, replace the <<<server.xml>>> file
+  with the provided <<<ssl-server.xml>>> file.
+
+  You need to create an SSL certificate for the KMS. As the
+  <<<kms>>> Unix user, using the Java <<<keytool>>> command to create the
+  SSL certificate:
+
++---+
+$ keytool -genkey -alias tomcat -keyalg RSA
++---+
+
+  You will be asked a series of questions in an interactive prompt.  It will
+  create the keystore file, which will be named <<.keystore>> and located in the
+  <<<kms>>> user home directory.
+
+  The password you enter for "keystore password" must match the  value of the
+  <<<KMS_SSL_KEYSTORE_PASS>>> environment variable set in the
+  <<<kms-env.sh>>> script in the configuration directory.
+
+  The answer to "What is your first and last name?" (i.e. "CN") must be the
+  hostname of the machine where the KMS will be running.
+
+  NOTE: You need to restart the KMS for the configuration changes to take
+  effect.
+
+*** KMS Access Control
+
+  KMS ACLs configuration are defined in the KMS <<<etc/hadoop/kms-acls.xml>>>
+  configuration file. This file is hot-reloaded when it changes.
+
+  KMS supports a fine grained access control via a set ACL
+  configuration properties:
+
++---+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user does is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operation.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata and get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+        Complimentary ACL for CREATE and ROLLOVER operation to allow the client
+        to provide the key material when creating or rolling a key.
+    </description>
+  </property>
++---+
+
+** KMS HTTP REST API
+
+*** Create a Key
+
+  <REQUEST:>
+
++---+
+POST http://HOST:PORT/kms/v1/keys
+Content-Type: application/json
+
+{
+  "name"        : "<key-name>",
+  "cipher"      : "<cipher>",
+  "length"      : <length>,        //int
+  "material"    : "<material>",    //base64
+  "description" : "<description>"
+}
++---+
+  
+  <RESPONSE:>
+  
++---+
+201 CREATED
+LOCATION: http://HOST:PORT/kms/v1/key/<key-name>
+Content-Type: application/json
+
+{
+  "name"        : "versionName",
+  "material"    : "<material>",    //base64, not present without GET ACL
+}
++---+
+
+*** Rollover Key
+
+  <REQUEST:>
+
++---+
+POST http://HOST:PORT/kms/v1/key/<key-name>
+Content-Type: application/json
+
+{
+  "material"    : "<material>",
+}
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+{
+  "name"        : "versionName",
+  "material"    : "<material>",    //base64, not present without GET ACL
+}
++---+
+
+*** Delete Key
+
+  <REQUEST:>
+
++---+
+DELETE http://HOST:PORT/kms/v1/key/<key-name>
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
++---+
+
+*** Get Key Metadata
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/key/<key-name>/_metadata
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+{
+  "name"        : "<key-name>",
+  "cipher"      : "<cipher>",
+  "length"      : <length>,        //int
+  "description" : "<description>",
+  "created"     : <millis-epoc>,   //long
+  "versions"    : <versions>       //int
+}
++---+
+
+*** Get Current Key
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/key/<key-name>/_currentversion
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+{
+  "name"        : "versionName",
+  "material"    : "<material>",    //base64
+}
++---+
+
+*** Get Key Version
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/keyversion/<version-name>
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+{
+  "name"        : "versionName",
+  "material"    : "<material>",    //base64
+}
++---+
+
+*** Get Key Versions
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/key/<key-name>/_versions
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+[
+  {
+    "name"        : "versionName",
+    "material"    : "<material>",    //base64
+  },
+  {
+    "name"        : "versionName",
+    "material"    : "<material>",    //base64
+  },
+  ...
+]
++---+
+
+*** Get Key Names
+
+  <REQUEST:>
+
++---+
+GET http://HOST:PORT/kms/v1/keys/names
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+[
+  "<key-name>",
+  "<key-name>",
+  ...
+]
++---+
+
+*** Get Keys Metadata
+
++---+
+GET http://HOST:PORT/kms/v1/keys/metadata?key=<key-name>&key=<key-name>,...
++---+
+
+  <RESPONSE:>
+
++---+
+200 OK
+Content-Type: application/json
+
+[
+  {
+    "name"        : "<key-name>",
+    "cipher"      : "<cipher>",
+    "length"      : <length>,        //int
+    "description" : "<description>",
+    "created"     : <millis-epoc>,   //long
+    "versions"    : <versions>       //int
+  },
+  {
+    "name"        : "<key-name>",
+    "cipher"      : "<cipher>",
+    "length"      : <length>,        //int
+    "description" : "<description>",
+    "created"     : <millis-epoc>,   //long
+    "versions"    : <versions>       //int
+  },
+  ...
+]
++---+
+
+  \[ {{{./index.html}Go Back}} \]

+ 29 - 0
hadoop-common-project/hadoop-kms/src/site/resources/css/site.css

@@ -0,0 +1,29 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}

+ 17 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/proto-datanode-web.xml → hadoop-common-project/hadoop-kms/src/site/site.xml

@@ -4,14 +4,26 @@
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
 
-    http://www.apache.org/licenses/LICENSE-2.0
+  http://www.apache.org/licenses/LICENSE-2.0
 
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
+  limitations under the License.
 -->
-<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
-@datanode.servlet.definitions@
-</web-app>
+<project name="Hadoop KMS">
+
+    <skin>
+      <groupId>org.apache.maven.skins</groupId>
+      <artifactId>maven-stylus-skin</artifactId>
+      <version>1.2</version>
+    </skin>
+
+    <body>
+      <links>
+        <item name="Apache Hadoop" href="http://hadoop.apache.org/"/>
+      </links>
+    </body>
+
+</project>

+ 806 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

@@ -0,0 +1,806 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.LoginContext;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
+import java.net.InetAddress;
+import java.net.MalformedURLException;
+import java.net.ServerSocket;
+import java.net.URI;
+import java.net.URL;
+import java.security.Principal;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+public class TestKMS {
+
+  public static File getTestDir() throws Exception {
+    File file = new File("dummy");
+    file = file.getAbsoluteFile();
+    file = file.getParentFile();
+    file = new File(file, "target");
+    file = new File(file, UUID.randomUUID().toString());
+    if (!file.mkdirs()) {
+      throw new RuntimeException("Could not create test directory: " + file);
+    }
+    return file;
+  }
+
+  public static Server createJettyServer(String keyStore, String password) {
+    try {
+      boolean ssl = keyStore != null;
+      InetAddress localhost = InetAddress.getByName("localhost");
+      String host = "localhost";
+      ServerSocket ss = new ServerSocket(0, 50, localhost);
+      int port = ss.getLocalPort();
+      ss.close();
+      Server server = new Server(0);
+      if (!ssl) {
+        server.getConnectors()[0].setHost(host);
+        server.getConnectors()[0].setPort(port);
+      } else {
+        SslSocketConnector c = new SslSocketConnector();
+        c.setHost(host);
+        c.setPort(port);
+        c.setNeedClientAuth(false);
+        c.setKeystore(keyStore);
+        c.setKeystoreType("jks");
+        c.setKeyPassword(password);
+        server.setConnectors(new Connector[]{c});
+      }
+      return server;
+    } catch (Exception ex) {
+      throw new RuntimeException("Could not start embedded servlet container, "
+          + ex.getMessage(), ex);
+    }
+  }
+
+  public static URL getJettyURL(Server server) {
+    boolean ssl = server.getConnectors()[0].getClass()
+        == SslSocketConnector.class;
+    try {
+      String scheme = (ssl) ? "https" : "http";
+      return new URL(scheme + "://" +
+          server.getConnectors()[0].getHost() + ":" +
+          server.getConnectors()[0].getPort());
+    } catch (MalformedURLException ex) {
+      throw new RuntimeException("It should never happen, " + ex.getMessage(),
+          ex);
+    }
+  }
+
+  public static abstract class KMSCallable implements Callable<Void> {
+    private URL kmsUrl;
+
+    protected URL getKMSUrl() {
+      return kmsUrl;
+    }
+  }
+
+  protected void runServer(String keystore, String password, File confDir,
+      KMSCallable callable) throws Exception {
+    System.setProperty(KMSConfiguration.KMS_CONFIG_DIR,
+        confDir.getAbsolutePath());
+    System.setProperty("log4j.configuration", "log4j.properties");
+    Server jetty = createJettyServer(keystore, password);
+    try {
+      ClassLoader cl = Thread.currentThread().getContextClassLoader();
+      URL url = cl.getResource("webapp");
+      if (url == null) {
+        throw new RuntimeException(
+            "Could not find webapp/ dir in test classpath");
+      }
+      WebAppContext context = new WebAppContext(url.getPath(), "/kms");
+      jetty.addHandler(context);
+      jetty.start();
+      url = new URL(getJettyURL(jetty), "kms");
+      System.out.println("Test KMS running at: " + url);
+      callable.kmsUrl = url;
+      callable.call();
+    } finally {
+      if (jetty != null && jetty.isRunning()) {
+        try {
+          jetty.stop();
+        } catch (Exception ex) {
+          throw new RuntimeException("Could not stop embedded Jetty, " +
+              ex.getMessage(), ex);
+        }
+      }
+    }
+  }
+
+  protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception {
+    Configuration conf = new Configuration(false);
+    conf.set("hadoop.security.key.provider.path",
+        "jceks://file@/" + keyStoreDir.getAbsolutePath() + "/kms.keystore");
+    conf.set("hadoop.kms.authentication.type", "simple");
+    return conf;
+  }
+
+  protected void writeConf(File confDir, Configuration conf) throws Exception {
+    Writer writer = new FileWriter(new File(confDir,
+        KMSConfiguration.KMS_SITE_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_ACLS_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    //create empty core-site.xml
+    writer = new FileWriter(new File(confDir, "core-site.xml"));
+    new Configuration(false).writeXml(writer);
+    writer.close();
+  }
+
+  protected URI createKMSUri(URL kmsUrl) throws Exception {
+    String str = kmsUrl.toString();
+    str = str.replaceFirst("://", "@");
+    return new URI("kms://" + str);
+  }
+
+
+  private static class KerberosConfiguration
+      extends javax.security.auth.login.Configuration {
+    private String principal;
+    private String keytab;
+    private boolean isInitiator;
+
+    private KerberosConfiguration(String principal, File keytab,
+        boolean client) {
+      this.principal = principal;
+      this.keytab = keytab.getAbsolutePath();
+      this.isInitiator = client;
+    }
+
+    public static javax.security.auth.login.Configuration createClientConfig(
+        String principal,
+        File keytab) {
+      return new KerberosConfiguration(principal, keytab, true);
+    }
+
+    private static String getKrb5LoginModuleName() {
+      return System.getProperty("java.vendor").contains("IBM")
+             ? "com.ibm.security.auth.module.Krb5LoginModule"
+             : "com.sun.security.auth.module.Krb5LoginModule";
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+      Map<String, String> options = new HashMap<String, String>();
+      options.put("keyTab", keytab);
+      options.put("principal", principal);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("doNotPrompt", "true");
+      options.put("useTicketCache", "true");
+      options.put("renewTGT", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("isInitiator", Boolean.toString(isInitiator));
+      String ticketCache = System.getenv("KRB5CCNAME");
+      if (ticketCache != null) {
+        options.put("ticketCache", ticketCache);
+      }
+      options.put("debug", "true");
+
+      return new AppConfigurationEntry[]{
+          new AppConfigurationEntry(getKrb5LoginModuleName(),
+              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+              options)};
+    }
+  }
+
+  private static MiniKdc kdc;
+  private static File keytab;
+
+  @BeforeClass
+  public static void setUpMiniKdc() throws Exception {
+    File kdcDir = getTestDir();
+    Properties kdcConf = MiniKdc.createConf();
+    kdc = new MiniKdc(kdcConf, kdcDir);
+    kdc.start();
+    keytab = new File(kdcDir, "keytab");
+    List<String> principals = new ArrayList<String>();
+    principals.add("HTTP/localhost");
+    principals.add("client");
+    principals.add("client/host");
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      principals.add(type.toString());
+    }
+    principals.add("CREATE_MATERIAL");
+    principals.add("ROLLOVER_MATERIAL");
+    kdc.createPrincipal(keytab,
+        principals.toArray(new String[principals.size()]));
+  }
+
+  @AfterClass
+  public static void tearDownMiniKdc() throws Exception {
+    if (kdc != null) {
+      kdc.stop();
+    }
+  }
+
+  private void doAs(String user, final PrivilegedExceptionAction<Void> action)
+      throws Exception {
+    Set<Principal> principals = new HashSet<Principal>();
+    principals.add(new KerberosPrincipal(user));
+
+    //client login
+    Subject subject = new Subject(false, principals,
+        new HashSet<Object>(), new HashSet<Object>());
+    LoginContext loginContext = new LoginContext("", subject, null,
+        KerberosConfiguration.createClientConfig(user, keytab));
+    try {
+      loginContext.login();
+      subject = loginContext.getSubject();
+      Subject.doAs(subject, action);
+    } finally {
+      loginContext.logout();
+    }
+  }
+
+  public void testStartStop(final boolean ssl, final boolean kerberos)
+      throws Exception {
+    File testDir = getTestDir();
+    Configuration conf = createBaseKMSConf(testDir);
+
+    final String keystore;
+    final String password;
+    if (ssl) {
+      String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestKMS.class);
+      KeyStoreTestUtil.setupSSLConfig(testDir.getAbsolutePath(), sslConfDir,
+          conf, false);
+      keystore = testDir.getAbsolutePath() + "/serverKS.jks";
+      password = "serverP";
+    } else {
+      keystore = null;
+      password = null;
+    }
+
+    if (kerberos) {
+      conf.set("hadoop.kms.authentication.type", "kerberos");
+      conf.set("hadoop.kms.authentication.kerberos.keytab",
+          keytab.getAbsolutePath());
+      conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+      conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+    }
+
+    writeConf(testDir, conf);
+
+    runServer(keystore, password, testDir, new KMSCallable() {
+      @Override
+      public Void call() throws Exception {
+        Configuration conf = new Configuration();
+        URL url = getKMSUrl();
+        Assert.assertEquals(keystore != null,
+            url.getProtocol().equals("https"));
+        URI uri = createKMSUri(getKMSUrl());
+        final KeyProvider kp = new KMSClientProvider(uri, conf);
+
+        if (kerberos) {
+          for (String user : new String[]{"client", "client/host"}) {
+            doAs(user, new PrivilegedExceptionAction<Void>() {
+              @Override
+              public Void run() throws Exception {
+                // getKeys() empty
+                Assert.assertTrue(kp.getKeys().isEmpty());
+                return null;
+              }
+            });
+          }
+        } else {
+          // getKeys() empty
+          Assert.assertTrue(kp.getKeys().isEmpty());
+        }
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testStartStopHttpPseudo() throws Exception {
+    testStartStop(false, false);
+  }
+
+  @Test
+  public void testStartStopHttpsPseudo() throws Exception {
+    testStartStop(true, false);
+  }
+
+  @Test
+  public void testStartStopHttpKerberos() throws Exception {
+    testStartStop(false, true);
+  }
+
+  @Test
+  public void testStartStopHttpsKerberos() throws Exception {
+    testStartStop(true, true);
+  }
+
+  @Test
+  public void testKMSProvider() throws Exception {
+    File confDir = getTestDir();
+    Configuration conf = createBaseKMSConf(confDir);
+    writeConf(confDir, conf);
+
+    runServer(null, null, confDir, new KMSCallable() {
+      @Override
+      public Void call() throws Exception {
+        Date started = new Date();
+        Configuration conf = new Configuration();
+        URI uri = createKMSUri(getKMSUrl());
+        KeyProvider kp = new KMSClientProvider(uri, conf);
+
+        // getKeys() empty
+        Assert.assertTrue(kp.getKeys().isEmpty());
+
+        // getKeysMetadata() empty
+        Assert.assertEquals(0, kp.getKeysMetadata().length);
+
+        // createKey()
+        KeyProvider.Options options = new KeyProvider.Options(conf);
+        options.setCipher("AES/CTR/NoPadding");
+        options.setBitLength(128);
+        options.setDescription("l1");
+        KeyProvider.KeyVersion kv0 = kp.createKey("k1", options);
+        Assert.assertNotNull(kv0);
+        Assert.assertNotNull(kv0.getVersionName());
+        Assert.assertNotNull(kv0.getMaterial());
+
+        // getKeyVersion()
+        KeyProvider.KeyVersion kv1 = kp.getKeyVersion(kv0.getVersionName());
+        Assert.assertEquals(kv0.getVersionName(), kv1.getVersionName());
+        Assert.assertNotNull(kv1.getMaterial());
+
+        // getCurrent()
+        KeyProvider.KeyVersion cv1 = kp.getCurrentKey("k1");
+        Assert.assertEquals(kv0.getVersionName(), cv1.getVersionName());
+        Assert.assertNotNull(cv1.getMaterial());
+
+        // getKeyMetadata() 1 version
+        KeyProvider.Metadata m1 = kp.getMetadata("k1");
+        Assert.assertEquals("AES/CTR/NoPadding", m1.getCipher());
+        Assert.assertEquals("AES", m1.getAlgorithm());
+        Assert.assertEquals(128, m1.getBitLength());
+        Assert.assertEquals(1, m1.getVersions());
+        Assert.assertNotNull(m1.getCreated());
+        Assert.assertTrue(started.before(m1.getCreated()));
+
+        // getKeyVersions() 1 version
+        List<KeyProvider.KeyVersion> lkv1 = kp.getKeyVersions("k1");
+        Assert.assertEquals(1, lkv1.size());
+        Assert.assertEquals(kv0.getVersionName(), lkv1.get(0).getVersionName());
+        Assert.assertNotNull(kv1.getMaterial());
+
+        // rollNewVersion()
+        KeyProvider.KeyVersion kv2 = kp.rollNewVersion("k1");
+        Assert.assertNotSame(kv0.getVersionName(), kv2.getVersionName());
+        Assert.assertNotNull(kv2.getMaterial());
+
+        // getKeyVersion()
+        kv2 = kp.getKeyVersion(kv2.getVersionName());
+        boolean eq = true;
+        for (int i = 0; i < kv1.getMaterial().length; i++) {
+          eq = eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
+        }
+        Assert.assertFalse(eq);
+
+        // getCurrent()
+        KeyProvider.KeyVersion cv2 = kp.getCurrentKey("k1");
+        Assert.assertEquals(kv2.getVersionName(), cv2.getVersionName());
+        Assert.assertNotNull(cv2.getMaterial());
+        eq = true;
+        for (int i = 0; i < kv1.getMaterial().length; i++) {
+          eq = eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
+        }
+        Assert.assertTrue(eq);
+
+        // getKeyVersions() 2 versions
+        List<KeyProvider.KeyVersion> lkv2 = kp.getKeyVersions("k1");
+        Assert.assertEquals(2, lkv2.size());
+        Assert.assertEquals(kv1.getVersionName(), lkv2.get(0).getVersionName());
+        Assert.assertNotNull(lkv2.get(0).getMaterial());
+        Assert.assertEquals(kv2.getVersionName(), lkv2.get(1).getVersionName());
+        Assert.assertNotNull(lkv2.get(1).getMaterial());
+
+        // getKeyMetadata() 2 version
+        KeyProvider.Metadata m2 = kp.getMetadata("k1");
+        Assert.assertEquals("AES/CTR/NoPadding", m2.getCipher());
+        Assert.assertEquals("AES", m2.getAlgorithm());
+        Assert.assertEquals(128, m2.getBitLength());
+        Assert.assertEquals(2, m2.getVersions());
+        Assert.assertNotNull(m2.getCreated());
+        Assert.assertTrue(started.before(m2.getCreated()));
+
+        // getKeys() 1 key
+        List<String> ks1 = kp.getKeys();
+        Assert.assertEquals(1, ks1.size());
+        Assert.assertEquals("k1", ks1.get(0));
+
+        // getKeysMetadata() 1 key 2 versions
+        KeyProvider.Metadata[] kms1 = kp.getKeysMetadata("k1");
+        Assert.assertEquals(1, kms1.length);
+        Assert.assertEquals("AES/CTR/NoPadding", kms1[0].getCipher());
+        Assert.assertEquals("AES", kms1[0].getAlgorithm());
+        Assert.assertEquals(128, kms1[0].getBitLength());
+        Assert.assertEquals(2, kms1[0].getVersions());
+        Assert.assertNotNull(kms1[0].getCreated());
+        Assert.assertTrue(started.before(kms1[0].getCreated()));
+
+        // deleteKey()
+        kp.deleteKey("k1");
+
+        // getKey()
+        Assert.assertNull(kp.getKeyVersion("k1"));
+
+        // getKeyVersions()
+        Assert.assertNull(kp.getKeyVersions("k1"));
+
+        // getMetadata()
+        Assert.assertNull(kp.getMetadata("k1"));
+
+        // getKeys() empty
+        Assert.assertTrue(kp.getKeys().isEmpty());
+
+        // getKeysMetadata() empty
+        Assert.assertEquals(0, kp.getKeysMetadata().length);
+
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testACLs() throws Exception {
+    final File testDir = getTestDir();
+    Configuration conf = createBaseKMSConf(testDir);
+    conf.set("hadoop.kms.authentication.type", "kerberos");
+    conf.set("hadoop.kms.authentication.kerberos.keytab",
+        keytab.getAbsolutePath());
+    conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+    conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      conf.set(type.getConfigKey(), type.toString());
+    }
+    conf.set(KMSACLs.Type.CREATE.getConfigKey(),
+        KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
+
+    conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(),
+        KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
+
+    writeConf(testDir, conf);
+
+    runServer(null, null, testDir, new KMSCallable() {
+      @Override
+      public Void call() throws Exception {
+        final Configuration conf = new Configuration();
+        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
+        URI uri = createKMSUri(getKMSUrl());
+        final KeyProvider kp = new KMSClientProvider(uri, conf);
+
+        //nothing allowed
+        doAs("client", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              kp.createKey("k", new KeyProvider.Options(conf));
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.createKey("k", new byte[8], new KeyProvider.Options(conf));
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.rollNewVersion("k");
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.rollNewVersion("k", new byte[8]);
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getKeys();
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getKeysMetadata("k");
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getKeyVersion(KMSClientProvider.buildVersionName("k", 0));
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getCurrentKey("k");
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getMetadata("k");
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            try {
+              kp.getKeyVersions("k");
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+
+            return null;
+          }
+        });
+
+        doAs("CREATE", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.createKey("k0",
+                  new KeyProvider.Options(conf));
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("DELETE", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              kp.deleteKey("k0");
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[8],
+                  new KeyProvider.Options(conf));
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.rollNewVersion("k1");
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.rollNewVersion("k1", new byte[8]);
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("GET", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              kp.getKeyVersion("k1@0");
+              kp.getCurrentKey("k1");
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              kp.getKeys();
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              kp.getMetadata("k1");
+              kp.getKeysMetadata("k1");
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        // test ACL reloading
+        Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
+        conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");
+        writeConf(testDir, conf);
+
+        KMSWebApp.getACLs().run(); // forcing a reload by hand.
+
+        // should not be able to create a key now
+        doAs("CREATE", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.createKey("k2",
+                  new KeyProvider.Options(conf));
+              Assert.fail();
+            } catch (AuthorizationException ex) {
+              //NOP
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+
+            return null;
+          }
+        });
+
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testServicePrincipalACLs() throws Exception {
+    File testDir = getTestDir();
+    Configuration conf = createBaseKMSConf(testDir);
+    conf.set("hadoop.kms.authentication.type", "kerberos");
+    conf.set("hadoop.kms.authentication.kerberos.keytab",
+        keytab.getAbsolutePath());
+    conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+    conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      conf.set(type.getConfigKey(), " ");
+    }
+    conf.set(KMSACLs.Type.CREATE.getConfigKey(), "client");
+
+    writeConf(testDir, conf);
+
+    runServer(null, null, testDir, new KMSCallable() {
+      @Override
+      public Void call() throws Exception {
+        final Configuration conf = new Configuration();
+        conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
+        URI uri = createKMSUri(getKMSUrl());
+        final KeyProvider kp = new KMSClientProvider(uri, conf);
+
+        doAs("client", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.createKey("ck0",
+                  new KeyProvider.Options(conf));
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+
+        doAs("client/host", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            try {
+              KeyProvider.KeyVersion kv = kp.createKey("ck1",
+                  new KeyProvider.Options(conf));
+              Assert.assertNull(kv.getMaterial());
+            } catch (Exception ex) {
+              Assert.fail(ex.toString());
+            }
+            return null;
+          }
+        });
+        return null;
+      }
+    });
+  }
+
+}

+ 47 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestKMSACLs {
+
+  @Test
+  public void testDefaults() {
+    KMSACLs acls = new KMSACLs(new Configuration(false));
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      Assert.assertTrue(acls.hasAccess(type, "foo"));
+    }
+  }
+
+  @Test
+  public void testCustom() {
+    Configuration conf = new Configuration(false);
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      conf.set(type.getConfigKey(), type.toString() + " ");
+    }
+    KMSACLs acls = new KMSACLs(conf);
+    for (KMSACLs.Type type : KMSACLs.Type.values()) {
+      Assert.assertTrue(acls.hasAccess(type, type.toString()));
+      Assert.assertFalse(acls.hasAccess(type, "foo"));
+    }
+  }
+
+}

+ 120 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSCacheKeyProvider.java

@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.Date;
+
+public class TestKMSCacheKeyProvider {
+
+  @Test
+  public void testCurrentKey() throws Exception {
+    KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
+    KeyProvider mockProv = Mockito.mock(KeyProvider.class);
+    Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
+    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+
+    // asserting caching
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
+    Thread.sleep(1200);
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
+
+    // asserting no caching when key is not known
+    cache = new KMSCacheKeyProvider(mockProv, 100);
+    Assert.assertEquals(null, cache.getCurrentKey("k2"));
+    Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
+    Assert.assertEquals(null, cache.getCurrentKey("k2"));
+    Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k2"));
+  }
+
+  @Test
+  public void testKeyVersion() throws Exception {
+    KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
+    KeyProvider mockProv = Mockito.mock(KeyProvider.class);
+    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
+    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+
+    // asserting caching
+    Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    Thread.sleep(200);
+    Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
+
+    // asserting no caching when key is not known
+    cache = new KMSCacheKeyProvider(mockProv, 100);
+    Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
+    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k2@0"));
+    Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
+    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k2@0"));
+  }
+
+  @Test
+  public void testRollNewVersion() throws Exception {
+    KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
+    KeyProvider mockProv = Mockito.mock(KeyProvider.class);
+    Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
+    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
+    cache.rollNewVersion("k1");
+
+    // asserting the cache is purged
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
+    cache.rollNewVersion("k1", new byte[0]);
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(3)).getCurrentKey(Mockito.eq("k1"));
+  }
+
+  @Test
+  public void testDeleteKey() throws Exception {
+    KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
+    KeyProvider mockProv = Mockito.mock(KeyProvider.class);
+    Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
+    Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
+        new KMSClientProvider.KMSMetadata("c", 0, "l", new Date(), 1));
+    KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
+    Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
+    cache.deleteKey("k1");
+
+    // asserting the cache is purged
+    Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
+    Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
+    Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
+    Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
+  }
+
+}

+ 31 - 0
hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties

@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# STDOUT Appender
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.rootLogger=WARN, stdout
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop.crytpo.key.kms.server=ALL
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
+log4j.logger.org.apache.hadoop.security=OFF
+log4j.logger.org.apache.directory.server.core=OFF
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF

+ 25 - 12
hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java

@@ -393,18 +393,22 @@ public class MiniKdc {
     map.put("4", bindAddress);
 
     ClassLoader cl = Thread.currentThread().getContextClassLoader();
-    InputStream is = cl.getResourceAsStream("minikdc.ldiff");
+    InputStream is1 = cl.getResourceAsStream("minikdc.ldiff");
 
     SchemaManager schemaManager = ds.getSchemaManager();
-    final String content = StrSubstitutor.replace(IOUtils.toString(is), map);
-    LdifReader reader = new LdifReader(new StringReader(content));
+    LdifReader reader = null;
+
     try {
+      final String content = StrSubstitutor.replace(IOUtils.toString(is1), map);
+      reader = new LdifReader(new StringReader(content));
+
       for (LdifEntry ldifEntry : reader) {
         ds.getAdminSession().add(new DefaultEntry(schemaManager,
                 ldifEntry.getEntry()));
       }
     } finally {
-      reader.close();
+      IOUtils.closeQuietly(reader);
+      IOUtils.closeQuietly(is1);
     }
 
     kdc = new KdcServer();
@@ -429,14 +433,23 @@ public class MiniKdc {
     kdc.start();
 
     StringBuilder sb = new StringBuilder();
-    is = cl.getResourceAsStream("minikdc-krb5.conf");
-    BufferedReader r = new BufferedReader(new InputStreamReader(is));
-    String line = r.readLine();
-    while (line != null) {
-      sb.append(line).append("{3}");
-      line = r.readLine();
+    InputStream is2 = cl.getResourceAsStream("minikdc-krb5.conf");
+
+    BufferedReader r = null;
+
+    try {
+      r = new BufferedReader(new InputStreamReader(is2));
+      String line = r.readLine();
+
+      while (line != null) {
+        sb.append(line).append("{3}");
+        line = r.readLine();
+      }
+    } finally {
+      IOUtils.closeQuietly(r);
+      IOUtils.closeQuietly(is2);
     }
-    r.close();
+
     krb5conf = new File(workDir, "krb5.conf").getAbsoluteFile();
     FileUtils.writeStringToFile(krb5conf,
             MessageFormat.format(sb.toString(), getRealm(), getHost(),
@@ -555,4 +568,4 @@ public class MiniKdc {
     keytab.setEntries(entries);
     keytab.write(keytabFile);
   }
-}
+}

+ 1 - 0
hadoop-common-project/pom.xml

@@ -37,6 +37,7 @@
     <module>hadoop-annotations</module>
     <module>hadoop-nfs</module>
     <module>hadoop-minikdc</module>
+    <module>hadoop-kms</module>
   </modules>
 
   <build>

+ 1 - 0
hadoop-dist/pom.xml

@@ -118,6 +118,7 @@
                       run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
+                      run cp -r $ROOT/hadoop-common-project/hadoop-kms/target/hadoop-kms-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-yarn-project/target/hadoop-yarn-project-${project.version}/* .
                       run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .

+ 0 - 48
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -107,26 +107,10 @@
           <groupId>commons-httpclient</groupId>
           <artifactId>commons-httpclient</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty</artifactId>
@@ -135,10 +119,6 @@
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty-util</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>servlet-api-2.5</artifactId>
@@ -151,10 +131,6 @@
           <groupId>org.eclipse.jdt</groupId>
           <artifactId>core</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>commons-el</groupId>
-          <artifactId>commons-el</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -170,26 +146,10 @@
           <groupId>commons-httpclient</groupId>
           <artifactId>commons-httpclient</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty</artifactId>
@@ -198,10 +158,6 @@
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty-util</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>servlet-api-2.5</artifactId>
@@ -214,10 +170,6 @@
           <groupId>org.eclipse.jdt</groupId>
           <artifactId>core</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>commons-el</groupId>
-          <artifactId>commons-el</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
     <dependency>

+ 0 - 10
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -134,11 +134,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>commons-daemon</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
@@ -179,11 +174,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>jackson-mapper-asl</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>xmlenc</groupId>
       <artifactId>xmlenc</artifactId>

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -127,6 +127,8 @@ Trunk (Unreleased)
 
     HDFS-6246. Remove 'dfs.support.append' flag from trunk code. (umamahesh)
 
+    HDFS-6252. Phase out the old web UI in HDFS. (wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -265,6 +267,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6281. Provide option to use the NFS Gateway without having to use the
     Hadoop portmapper. (atm)
 
+    HDFS-5168. Add cross node dependency support to BlockPlacementPolicy.
+    (Nikola Vujic via szetszwo)
+
   IMPROVEMENTS
 
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -329,6 +334,20 @@ Release 2.5.0 - UNRELEASED
     HDFS-6210. Support GETACLSTATUS operation in WebImageViewer.
     (Akira Ajisaka via wheat9)
 
+    HDFS-6269. NameNode Audit Log should differentiate between webHDFS open and
+    HDFS open. (Eric Payne via jeagles)
+
+    HDFS-6304. Consolidate the logic of path resolution in FSDirectory.
+    (wheat9)
+
+    HDFS-6295. Add "decommissioning" state and node state filtering to
+    dfsadmin. (wang)
+
+    HDFS-6294. Use INode IDs to avoid conflicts when a file open for write is
+    renamed. (cmccabe)
+
+    HDFS-6328. Clean up dead code in FSDirectory. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -412,6 +431,21 @@ Release 2.5.0 - UNRELEASED
     HDFS-6218. Audit log should use true client IP for proxied webhdfs
     operations. (daryn via kihwal)
 
+    HDFS-6288. DFSInputStream Pread doesn't update ReadStatistics.
+    (Juan Yu via wang)
+
+    HDFS-6289. HA failover can fail if there are pending DN messages for DNs
+    which no longer exist. (atm)
+
+    HDFS-6337. Setfacl testcase is failing due to dash character in username
+    in TestAclCLI (umamahesh)
+
+    HDFS-5381. ExtendedBlock#hashCode should use both blockId and block pool ID    
+    (Benoy Antony via Colin Patrick McCabe)
+
+    HDFS-6240. WebImageViewer returns 404 if LISTSTATUS to an empty directory.
+    (Akira Ajisaka via wheat9)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -467,6 +501,17 @@ Release 2.4.1 - UNRELEASED
     HDFS-6245. datanode fails to start with a bad disk even when failed
     volumes is set. (Arpit Agarwal)
 
+    HDFS-2882. DN continues to start up, even if block pool fails to initialize
+    (vinayakumarb)
+
+    HDFS-6340. DN can't finalize upgrade. (Rahul Singhal via Arpit Agarwal)
+
+    HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is
+    not configured. (kihwal)
+
+    HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs
+    (kihwal)
+
 Release 2.4.0 - 2014-04-07 
 
   INCOMPATIBLE CHANGES

+ 6 - 155
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -121,11 +121,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>commons-daemon</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
@@ -166,11 +161,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>jackson-mapper-asl</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>xmlenc</groupId>
       <artifactId>xmlenc</artifactId>
@@ -201,123 +191,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
           </properties>
         </configuration>
       </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo.jspc</groupId>
-        <artifactId>jspc-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>hdfs</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>compile</goal>
-            </goals>
-            <configuration>
-              <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
-              <webFragmentFile>${project.build.directory}/hdfs-jsp-servlet-definitions.xml</webFragmentFile>
-              <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
-              <sources>
-                <directory>${basedir}/src/main/webapps/hdfs</directory>
-                <includes>
-                  <include>*.jsp</include>
-                </includes>
-              </sources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>secondary</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>compile</goal>
-            </goals>
-            <configuration>
-              <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
-              <webFragmentFile>${project.build.directory}/secondary-jsp-servlet-definitions.xml</webFragmentFile>
-              <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
-              <sources>
-                <directory>${basedir}/src/main/webapps/secondary</directory>
-                <includes>
-                  <include>*.jsp</include>
-                </includes>
-              </sources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>journal</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>compile</goal>
-            </goals>
-            <configuration>
-              <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
-              <webFragmentFile>${project.build.directory}/journal-jsp-servlet-definitions.xml</webFragmentFile>
-              <packageName>org.apache.hadoop.hdfs.server.journalservice</packageName>
-              <sources>
-                <directory>${basedir}/src/main/webapps/journal</directory>
-                <includes>
-                  <include>*.jsp</include>
-                </includes>
-              </sources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>datanode</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>compile</goal>
-            </goals>
-            <configuration>
-              <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
-              <webFragmentFile>${project.build.directory}/datanode-jsp-servlet-definitions.xml</webFragmentFile>
-              <packageName>org.apache.hadoop.hdfs.server.datanode</packageName>
-              <sources>
-                <directory>${basedir}/src/main/webapps/datanode</directory>
-                <includes>
-                  <include>*.jsp</include>
-                </includes>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-        <dependencies>
-          <dependency>
-            <groupId>org.codehaus.mojo.jspc</groupId>
-            <artifactId>jspc-compiler-tomcat5</artifactId>
-            <version>2.0-alpha-3</version>
-          </dependency>
-          <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-            <version>1.4.1</version>
-          </dependency>
-          <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>jcl104-over-slf4j</artifactId>
-            <version>1.4.1</version>
-          </dependency>
-        </dependencies>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-jsp-generated-sources-directory</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.build.directory}/generated-sources/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>
@@ -325,18 +198,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
           <skipTests>false</skipTests>
         </configuration>
         <executions>
-          <execution>
-            <id>create-jsp-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${project.build.directory}/generated-sources/java" />
-              </target>
-            </configuration>
-          </execution>
           <execution>
             <id>create-web-xmls</id>
             <phase>compile</phase>
@@ -345,32 +206,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             </goals>
             <configuration>
               <target>
-                <loadfile property="hdfs.servlet.definitions" srcFile="${project.build.directory}/hdfs-jsp-servlet-definitions.xml"/>
-                <loadfile property="secondary.servlet.definitions" srcFile="${project.build.directory}/secondary-jsp-servlet-definitions.xml"/>
-                <loadfile property="datanode.servlet.definitions" srcFile="${project.build.directory}/datanode-jsp-servlet-definitions.xml"/>
-                <loadfile property="journal.servlet.definitions" srcFile="${project.build.directory}/journal-jsp-servlet-definitions.xml"/>               
-                <echoproperties destfile="${project.build.directory}/webxml.properties">
-                  <propertyset>
-                    <propertyref regex=".*.servlet.definitions"/>
-                  </propertyset>
-                </echoproperties>
-                <filter filtersfile="${project.build.directory}/webxml.properties"/>
-                <copy file="${basedir}/src/main/webapps/proto-hdfs-web.xml"
+                <copy file="${basedir}/src/main/webapps/proto-web.xml"
                       tofile="${project.build.directory}/webapps/hdfs/WEB-INF/web.xml"
                       filtering="true"/>
-                <copy file="${basedir}/src/main/webapps/proto-secondary-web.xml"
+                <copy file="${basedir}/src/main/webapps/proto-web.xml"
                       tofile="${project.build.directory}/webapps/secondary/WEB-INF/web.xml"
                       filtering="true"/>
-                <copy file="${basedir}/src/main/webapps/proto-datanode-web.xml"
+                <copy file="${basedir}/src/main/webapps/proto-web.xml"
                       tofile="${project.build.directory}/webapps/datanode/WEB-INF/web.xml"
                       filtering="true"/>
-                <copy file="${basedir}/src/main/webapps/proto-journal-web.xml"
+                <copy file="${basedir}/src/main/webapps/proto-web.xml"
                       tofile="${project.build.directory}/webapps/journal/WEB-INF/web.xml"
                       filtering="true"/>
                 <copy toDir="${project.build.directory}/webapps">
                   <fileset dir="${basedir}/src/main/webapps">
-                    <exclude name="**/*.jsp"/>
-                    <exclude name="**/proto-*-web.xml"/>
+                    <exclude name="**/proto-web.xml"/>
                   </fileset>
                 </copy>
               </target>
@@ -391,6 +241,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <copy todir="${project.build.directory}/test-classes/webapps">
                   <fileset dir="${project.build.directory}/webapps">
                     <exclude name="proto-*-web.xml"/>
+                    <exclude name="**/proto-web.xml"/>
                   </fileset>
                 </copy>
               </target>

+ 20 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -504,8 +504,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
    * that are currently being written by this client.
    * Note that a file can only be written by a single client.
    */
-  private final Map<String, DFSOutputStream> filesBeingWritten
-      = new HashMap<String, DFSOutputStream>();
+  private final Map<Long, DFSOutputStream> filesBeingWritten
+      = new HashMap<Long, DFSOutputStream>();
 
   /**
    * Same as this(NameNode.getAddress(conf), conf);
@@ -734,14 +734,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
   }
 
   /** Get a lease and start automatic renewal */
-  private void beginFileLease(final String src, final DFSOutputStream out) 
+  private void beginFileLease(final long inodeId, final DFSOutputStream out)
       throws IOException {
-    getLeaseRenewer().put(src, out, this);
+    getLeaseRenewer().put(inodeId, out, this);
   }
 
   /** Stop renewal of lease for the file. */
-  void endFileLease(final String src) throws IOException {
-    getLeaseRenewer().closeFile(src, this);
+  void endFileLease(final long inodeId) throws IOException {
+    getLeaseRenewer().closeFile(inodeId, this);
   }
     
 
@@ -749,9 +749,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
    *  enforced to consistently update its local dfsclients array and 
    *  client's filesBeingWritten map.
    */
-  void putFileBeingWritten(final String src, final DFSOutputStream out) {
+  void putFileBeingWritten(final long inodeId, final DFSOutputStream out) {
     synchronized(filesBeingWritten) {
-      filesBeingWritten.put(src, out);
+      filesBeingWritten.put(inodeId, out);
       // update the last lease renewal time only when there was no
       // writes. once there is one write stream open, the lease renewer
       // thread keeps it updated well with in anyone's expiration time.
@@ -762,9 +762,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
   }
 
   /** Remove a file. Only called from LeaseRenewer. */
-  void removeFileBeingWritten(final String src) {
+  void removeFileBeingWritten(final long inodeId) {
     synchronized(filesBeingWritten) {
-      filesBeingWritten.remove(src);
+      filesBeingWritten.remove(inodeId);
       if (filesBeingWritten.isEmpty()) {
         lastLeaseRenewal = 0;
       }
@@ -849,14 +849,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
   /** Close/abort all files being written. */
   private void closeAllFilesBeingWritten(final boolean abort) {
     for(;;) {
-      final String src;
+      final long inodeId;
       final DFSOutputStream out;
       synchronized(filesBeingWritten) {
         if (filesBeingWritten.isEmpty()) {
           return;
         }
-        src = filesBeingWritten.keySet().iterator().next();
-        out = filesBeingWritten.remove(src);
+        inodeId = filesBeingWritten.keySet().iterator().next();
+        out = filesBeingWritten.remove(inodeId);
       }
       if (out != null) {
         try {
@@ -866,8 +866,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
             out.close();
           }
         } catch(IOException ie) {
-          LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
-              ie);
+          LOG.error("Failed to " + (abort? "abort": "close") +
+                  " inode " + inodeId, ie);
         }
       }
     }
@@ -1465,7 +1465,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
         src, masked, flag, createParent, replication, blockSize, progress,
         buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
-    beginFileLease(src, result);
+    beginFileLease(result.getFileId(), result);
     return result;
   }
   
@@ -1513,7 +1513,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
           flag, createParent, replication, blockSize, progress, buffersize,
           checksum);
     }
-    beginFileLease(src, result);
+    beginFileLease(result.getFileId(), result);
     return result;
   }
   
@@ -1601,7 +1601,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
           + src + " on client " + clientName);
     }
     final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
-    beginFileLease(src, result);
+    beginFileLease(result.getFileId(), result);
     return result;
   }
 
@@ -2438,8 +2438,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
   }
 
   @VisibleForTesting
-  ExtendedBlock getPreviousBlock(String file) {
-    return filesBeingWritten.get(file).getBlock();
+  ExtendedBlock getPreviousBlock(long fileId) {
+    return filesBeingWritten.get(fileId).getBlock();
   }
   
   /**

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -1038,6 +1038,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
             setConfiguration(dfsClient.getConfiguration()).
             build();
         int nread = reader.readAll(buf, offset, len);
+        updateReadStatistics(readStatistics, nread, reader);
+
         if (nread != len) {
           throw new IOException("truncated return from reader.read(): " +
                                 "excpected " + len + ", got " + nread);

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -1012,7 +1012,7 @@ public class DFSOutputStream extends FSOutputSummer
       //get a new datanode
       final DatanodeInfo[] original = nodes;
       final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
-          src, block, nodes, storageIDs,
+          src, fileId, block, nodes, storageIDs,
           failed.toArray(new DatanodeInfo[failed.size()]),
           1, dfsClient.clientName);
       setPipeline(lb);
@@ -1268,7 +1268,8 @@ public class DFSOutputStream extends FSOutputSummer
 
         if (!success) {
           DFSClient.LOG.info("Abandoning " + block);
-          dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName);
+          dfsClient.namenode.abandonBlock(block, fileId, src,
+              dfsClient.clientName);
           block = null;
           DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
           excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
@@ -1914,7 +1915,8 @@ public class DFSOutputStream extends FSOutputSummer
       // namenode.
       if (persistBlocks.getAndSet(false) || updateLength) {
         try {
-          dfsClient.namenode.fsync(src, dfsClient.clientName, lastBlockLength);
+          dfsClient.namenode.fsync(src, fileId,
+              dfsClient.clientName, lastBlockLength);
         } catch (IOException ioe) {
           DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
           // If we got an error here, it might be because some other thread called
@@ -2035,7 +2037,7 @@ public class DFSOutputStream extends FSOutputSummer
     streamer.setLastException(new IOException("Lease timeout of "
         + (dfsClient.getHdfsTimeout()/1000) + " seconds expired."));
     closeThreads(true);
-    dfsClient.endFileLease(src);
+    dfsClient.endFileLease(fileId);
   }
 
   // shutdown datastreamer and responseprocessor threads.
@@ -2090,7 +2092,7 @@ public class DFSOutputStream extends FSOutputSummer
       ExtendedBlock lastBlock = streamer.getBlock();
       closeThreads(false);
       completeFile(lastBlock);
-      dfsClient.endFileLease(src);
+      dfsClient.endFileLease(fileId);
     } catch (ClosedChannelException e) {
     } finally {
       closed = true;
@@ -2184,7 +2186,7 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   @VisibleForTesting
-  long getFileId() {
+  public long getFileId() {
     return fileId;
   }
 }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 
 /**
  * {@link PolicyProvider} for HDFS protocols.
@@ -64,7 +65,10 @@ public class HDFSPolicyProvider extends PolicyProvider {
         RefreshUserMappingsProtocol.class),
     new Service(
         CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_GET_USER_MAPPINGS,
-        GetUserMappingsProtocol.class)
+        GetUserMappingsProtocol.class),
+    new Service(
+        CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE,
+        RefreshCallQueueProtocol.class)
   };
   
   @Override

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java

@@ -281,7 +281,7 @@ class LeaseRenewer {
         && Time.now() - emptyTime > gracePeriod;
   }
 
-  synchronized void put(final String src, final DFSOutputStream out,
+  synchronized void put(final long inodeId, final DFSOutputStream out,
       final DFSClient dfsc) {
     if (dfsc.isClientRunning()) {
       if (!isRunning() || isRenewerExpired()) {
@@ -319,7 +319,7 @@ class LeaseRenewer {
         });
         daemon.start();
       }
-      dfsc.putFileBeingWritten(src, out);
+      dfsc.putFileBeingWritten(inodeId, out);
       emptyTime = Long.MAX_VALUE;
     }
   }
@@ -330,8 +330,8 @@ class LeaseRenewer {
   }
 
   /** Close a file. */
-  void closeFile(final String src, final DFSClient dfsc) {
-    dfsc.removeFileBeingWritten(src);
+  void closeFile(final long inodeId, final DFSClient dfsc) {
+    dfsc.removeFileBeingWritten(inodeId);
 
     synchronized(this) {
       if (dfsc.isFilesBeingWrittenEmpty()) {

+ 17 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -292,13 +292,20 @@ public interface ClientProtocol {
    * file.
    * Any partial writes to the block will be discarded.
    * 
+   * @param b         Block to abandon
+   * @param fileId    The id of the file where the block resides.  Older clients
+   *                    will pass GRANDFATHER_INODE_ID here.
+   * @param src       The path of the file where the block resides.
+   * @param holder    Lease holder.
+   *
    * @throws AccessControlException If access is denied
    * @throws FileNotFoundException file <code>src</code> is not found
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink
    * @throws IOException If an I/O error occurred
    */
   @Idempotent
-  public void abandonBlock(ExtendedBlock b, String src, String holder)
+  public void abandonBlock(ExtendedBlock b, long fileId,
+      String src, String holder)
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 
@@ -346,6 +353,7 @@ public interface ClientProtocol {
    * Get a datanode for an existing pipeline.
    * 
    * @param src the file being written
+   * @param fileId the ID of the file being written
    * @param blk the block being written
    * @param existings the existing nodes in the pipeline
    * @param excludes the excluded nodes
@@ -361,8 +369,10 @@ public interface ClientProtocol {
    * @throws IOException If an I/O error occurred
    */
   @Idempotent
-  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
-      final DatanodeInfo[] existings, final String[] existingStorageIDs,
+  public LocatedBlock getAdditionalDatanode(final String src,
+      final long fileId, final ExtendedBlock blk,
+      final DatanodeInfo[] existings,
+      final String[] existingStorageIDs,
       final DatanodeInfo[] excludes,
       final int numAdditionalNodes, final String clientName
       ) throws AccessControlException, FileNotFoundException,
@@ -898,6 +908,8 @@ public interface ClientProtocol {
    * Write all metadata for this file into persistent storage.
    * The file must be currently open for writing.
    * @param src The string representation of the path
+   * @param inodeId The inode ID, or GRANDFATHER_INODE_ID if the client is
+   *                too old to support fsync with inode IDs.
    * @param client The string representation of the client
    * @param lastBlockLength The length of the last block (under construction) 
    *                        to be reported to NameNode 
@@ -907,7 +919,8 @@ public interface ClientProtocol {
    * @throws IOException If an I/O error occurred
    */
   @Idempotent
-  public void fsync(String src, String client, long lastBlockLength) 
+  public void fsync(String src, long inodeId, String client,
+                    long lastBlockLength)
       throws AccessControlException, FileNotFoundException, 
       UnresolvedLinkException, IOException;
 

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 
 import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
 
 import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
 
@@ -50,6 +52,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private int xceiverCount;
   private String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
+  private List<String> dependentHostNames = new LinkedList<String>();
+  
   
   // Datanode administrative states
   public enum AdminStates {
@@ -274,6 +278,21 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public synchronized void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
   }
+  
+  /** Add a hostname to a list of network dependencies */
+  public void addDependentHostName(String hostname) {
+    dependentHostNames.add(hostname);
+  }
+  
+  /** List of Network dependencies */
+  public List<String> getDependentHostNames() {
+    return dependentHostNames;
+  }
+  
+  /** Sets the network dependencies */
+  public void setDependentHostNames(List<String> dependencyList) {
+    dependentHostNames = dependencyList;
+  }
     
   /** A formatted string for reporting the status of the DataNode. */
   public String getDatanodeReport() {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java

@@ -112,7 +112,8 @@ public class ExtendedBlock {
   
   @Override // Object
   public int hashCode() {
-    return block.hashCode();
+    int result = 31 + poolId.hashCode();
+    return (31 * result + block.hashCode());
   }
   
   @Override // Object

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -104,7 +104,7 @@ public class HdfsConstants {
 
   // type of the datanode report
   public static enum DatanodeReportType {
-    ALL, LIVE, DEAD
+    ALL, LIVE, DEAD, DECOMMISSIONING
   }
 
   // An invalid transaction ID that will never be seen in a real namesystem.

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -434,8 +434,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public AbandonBlockResponseProto abandonBlock(RpcController controller,
       AbandonBlockRequestProto req) throws ServiceException {
     try {
-      server.abandonBlock(PBHelper.convert(req.getB()), req.getSrc(),
-          req.getHolder());
+      server.abandonBlock(PBHelper.convert(req.getB()), req.getFileId(),
+          req.getSrc(), req.getHolder());
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -473,7 +473,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       List<String> existingStorageIDsList = req.getExistingStorageUuidsList();
       List<DatanodeInfoProto> excludesList = req.getExcludesList();
       LocatedBlock result = server.getAdditionalDatanode(req.getSrc(),
-          PBHelper.convert(req.getBlk()),
+          req.getFileId(), PBHelper.convert(req.getBlk()),
           PBHelper.convert(existingList.toArray(
               new DatanodeInfoProto[existingList.size()])),
           existingStorageIDsList.toArray(
@@ -831,7 +831,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public FsyncResponseProto fsync(RpcController controller,
       FsyncRequestProto req) throws ServiceException {
     try {
-      server.fsync(req.getSrc(), req.getClient(), req.getLastBlockLength());
+      server.fsync(req.getSrc(), req.getFileId(),
+          req.getClient(), req.getLastBlockLength());
       return VOID_FSYNC_RESPONSE;
     } catch (IOException e) {
       throw new ServiceException(e);

+ 12 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -334,11 +334,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public void abandonBlock(ExtendedBlock b, String src, String holder)
-      throws AccessControlException, FileNotFoundException,
-      UnresolvedLinkException, IOException {
+  public void abandonBlock(ExtendedBlock b, long fileId, String src,
+      String holder) throws AccessControlException, FileNotFoundException,
+        UnresolvedLinkException, IOException {
     AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder()
-        .setB(PBHelper.convert(b)).setSrc(src).setHolder(holder).build();
+        .setB(PBHelper.convert(b)).setSrc(src).setHolder(holder)
+            .setFileId(fileId).build();
     try {
       rpcProxy.abandonBlock(null, req);
     } catch (ServiceException e) {
@@ -370,8 +371,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public LocatedBlock getAdditionalDatanode(String src, ExtendedBlock blk,
-      DatanodeInfo[] existings, String[] existingStorageIDs,
+  public LocatedBlock getAdditionalDatanode(String src, long fileId,
+      ExtendedBlock blk, DatanodeInfo[] existings, String[] existingStorageIDs,
       DatanodeInfo[] excludes,
       int numAdditionalNodes, String clientName) throws AccessControlException,
       FileNotFoundException, SafeModeException, UnresolvedLinkException,
@@ -379,6 +380,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetAdditionalDatanodeRequestProto req = GetAdditionalDatanodeRequestProto
         .newBuilder()
         .setSrc(src)
+        .setFileId(fileId)
         .setBlk(PBHelper.convert(blk))
         .addAllExistings(PBHelper.convert(existings))
         .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
@@ -755,11 +757,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public void fsync(String src, String client, long lastBlockLength)
+  public void fsync(String src, long fileId, String client,
+                    long lastBlockLength)
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException {
     FsyncRequestProto req = FsyncRequestProto.newBuilder().setSrc(src)
-        .setClient(client).setLastBlockLength(lastBlockLength).build();
+        .setClient(client).setLastBlockLength(lastBlockLength)
+            .setFileId(fileId).build();
     try {
       rpcProxy.fsync(null, req);
     } catch (ServiceException e) {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -1422,6 +1422,7 @@ public class PBHelper {
     case ALL: return DatanodeReportTypeProto.ALL;
     case LIVE: return DatanodeReportTypeProto.LIVE;
     case DEAD: return DatanodeReportTypeProto.DEAD;
+    case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
     default: 
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }
@@ -1433,6 +1434,7 @@ public class PBHelper {
     case ALL: return DatanodeReportType.ALL;
     case LIVE: return DatanodeReportType.LIVE;
     case DEAD: return DatanodeReportType.DEAD;
+    case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
     default: 
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -842,7 +842,7 @@ public class Balancer {
    */
   private static void checkReplicationPolicyCompatibility(Configuration conf
       ) throws UnsupportedActionException {
-    if (!(BlockPlacementPolicy.getInstance(conf, null, null) instanceof 
+    if (!(BlockPlacementPolicy.getInstance(conf, null, null, null) instanceof 
         BlockPlacementPolicyDefault)) {
       throw new UnsupportedActionException(
           "Balancer without BlockPlacementPolicyDefault");

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -267,7 +267,8 @@ public class BlockManager {
     blocksMap = new BlocksMap(
         LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
     blockplacement = BlockPlacementPolicy.getInstance(
-        conf, stats, datanodeManager.getNetworkTopology());
+        conf, stats, datanodeManager.getNetworkTopology(), 
+        datanodeManager.getHost2DatanodeMap());
     pendingReplications = new PendingReplicationBlocks(conf.getInt(
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
@@ -1007,6 +1008,8 @@ public class BlockManager {
     while(it.hasNext()) {
       removeStoredBlock(it.next(), node);
     }
+    // Remove all pending DN messages referencing this DN.
+    pendingDNMessages.removeAllMessagesForDatanode(node);
 
     node.resetBlocks();
     invalidateBlocks.remove(node.getDatanodeUuid());
@@ -1081,7 +1084,8 @@ public class BlockManager {
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot mark " + b
-          + " as corrupt because datanode " + dn + " does not exist");
+          + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
+          + ") does not exist");
     }
 
     BlockCollection bc = b.corrupted.getBlockCollection();
@@ -1981,6 +1985,9 @@ public class BlockManager {
         // If the block is an out-of-date generation stamp or state,
         // but we're the standby, we shouldn't treat it as corrupt,
         // but instead just queue it for later processing.
+        // TODO: Pretty confident this should be s/storedBlock/block below,
+        // since we should be postponing the info of the reported block, not
+        // the stored block. See HDFS-6289 for more context.
         queueReportedBlock(dn, storageID, storedBlock, reportedState,
             QUEUE_REASON_CORRUPT_STATE);
       } else {

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -139,7 +139,8 @@ public abstract class BlockPlacementPolicy {
    * @param clusterMap cluster topology
    */
   abstract protected void initialize(Configuration conf,  FSClusterStats stats, 
-                                     NetworkTopology clusterMap);
+                                     NetworkTopology clusterMap, 
+                                     Host2NodesMap host2datanodeMap);
     
   /**
    * Get an instance of the configured Block Placement Policy based on the
@@ -153,14 +154,15 @@ public abstract class BlockPlacementPolicy {
    */
   public static BlockPlacementPolicy getInstance(Configuration conf, 
                                                  FSClusterStats stats,
-                                                 NetworkTopology clusterMap) {
+                                                 NetworkTopology clusterMap,
+                                                 Host2NodesMap host2datanodeMap) {
     final Class<? extends BlockPlacementPolicy> replicatorClass = conf.getClass(
         DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
         DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT,
         BlockPlacementPolicy.class);
     final BlockPlacementPolicy replicator = ReflectionUtils.newInstance(
         replicatorClass, conf);
-    replicator.initialize(conf, stats, clusterMap);
+    replicator.initialize(conf, stats, clusterMap, host2datanodeMap);
     return replicator;
   }
   

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -70,6 +70,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   protected boolean considerLoad; 
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
+  protected Host2NodesMap host2datanodeMap;
   private FSClusterStats stats;
   protected long heartbeatInterval;   // interval for DataNode heartbeats
   private long staleInterval;   // interval used to identify stale DataNodes
@@ -80,8 +81,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   protected int tolerateHeartbeatMultiplier;
 
   protected BlockPlacementPolicyDefault(Configuration conf, FSClusterStats stats,
-                           NetworkTopology clusterMap) {
-    initialize(conf, stats, clusterMap);
+                           NetworkTopology clusterMap, 
+                           Host2NodesMap host2datanodeMap) {
+    initialize(conf, stats, clusterMap, host2datanodeMap);
   }
 
   protected BlockPlacementPolicyDefault() {
@@ -89,11 +91,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     
   @Override
   public void initialize(Configuration conf,  FSClusterStats stats,
-                         NetworkTopology clusterMap) {
+                         NetworkTopology clusterMap, 
+                         Host2NodesMap host2datanodeMap) {
     this.considerLoad = conf.getBoolean(
         DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
     this.stats = stats;
     this.clusterMap = clusterMap;
+    this.host2datanodeMap = host2datanodeMap;
     this.heartbeatInterval = conf.getLong(
         DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;

+ 35 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -47,8 +47,8 @@ import org.apache.hadoop.net.NodeBase;
 public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefault {
 
   protected BlockPlacementPolicyWithNodeGroup(Configuration conf,  FSClusterStats stats,
-      NetworkTopology clusterMap) {
-    initialize(conf, stats, clusterMap);
+      NetworkTopology clusterMap, DatanodeManager datanodeManager) {
+    initialize(conf, stats, clusterMap, host2datanodeMap);
   }
 
   protected BlockPlacementPolicyWithNodeGroup() {
@@ -56,8 +56,9 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
 
   @Override
   public void initialize(Configuration conf,  FSClusterStats stats,
-          NetworkTopology clusterMap) {
-    super.initialize(conf, stats, clusterMap);
+          NetworkTopology clusterMap, 
+          Host2NodesMap host2datanodeMap) {
+    super.initialize(conf, stats, clusterMap, host2datanodeMap);
   }
 
   /** choose local node of localMachine as the target.
@@ -241,6 +242,36 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
         countOfExcludedNodes++;
       }
     }
+    
+    countOfExcludedNodes += addDependentNodesToExcludedNodes(
+        chosenNode, excludedNodes);
+    return countOfExcludedNodes;
+  }
+  
+  /**
+   * Add all nodes from a dependent nodes list to excludedNodes.
+   * @return number of new excluded nodes
+   */
+  private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode,
+      Set<Node> excludedNodes) {
+    if (this.host2datanodeMap == null) {
+      return 0;
+    }
+    int countOfExcludedNodes = 0;
+    for(String hostname : chosenNode.getDependentHostNames()) {
+      DatanodeDescriptor node =
+          this.host2datanodeMap.getDataNodeByHostName(hostname);
+      if(node!=null) {
+        if (excludedNodes.add(node)) {
+          countOfExcludedNodes++;
+        }
+      } else {
+        LOG.warn("Not able to find datanode " + hostname
+            + " which has dependency with datanode "
+            + chosenNode.getHostName());
+      }
+    }
+    
     return countOfExcludedNodes;
   }
 

+ 72 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -373,6 +373,11 @@ public class DatanodeManager {
     return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort);
   }
 
+  /** @return the Host2NodesMap */
+  public Host2NodesMap getHost2DatanodeMap() {
+    return this.host2DatanodeMap;
+  }
+
   /**
    * Given datanode address or host name, returns the DatanodeDescriptor for the
    * same, or if it doesn't find the datanode, it looks for a machine local and
@@ -677,6 +682,52 @@ public class DatanodeManager {
     return networkLocation;
   }
 
+  /**
+   * Resolve a node's dependencies in the network. If the DNS to switch 
+   * mapping fails then this method returns empty list of dependencies 
+   * @param node to get dependencies for
+   * @return List of dependent host names
+   */
+  private List<String> getNetworkDependenciesWithDefault(DatanodeInfo node) {
+    List<String> dependencies;
+    try {
+      dependencies = getNetworkDependencies(node);
+    } catch (UnresolvedTopologyException e) {
+      LOG.error("Unresolved dependency mapping for host " + 
+          node.getHostName() +". Continuing with an empty dependency list");
+      dependencies = Collections.emptyList();
+    }
+    return dependencies;
+  }
+  
+  /**
+   * Resolves a node's dependencies in the network. If the DNS to switch 
+   * mapping fails to get dependencies, then this method throws 
+   * UnresolvedTopologyException. 
+   * @param node to get dependencies for
+   * @return List of dependent host names 
+   * @throws UnresolvedTopologyException if the DNS to switch mapping fails
+   */
+  private List<String> getNetworkDependencies(DatanodeInfo node)
+      throws UnresolvedTopologyException {
+    List<String> dependencies = Collections.emptyList();
+
+    if (dnsToSwitchMapping instanceof DNSToSwitchMappingWithDependency) {
+      //Get dependencies
+      dependencies = 
+          ((DNSToSwitchMappingWithDependency)dnsToSwitchMapping).getDependency(
+              node.getHostName());
+      if(dependencies == null) {
+        LOG.error("The dependency call returned null for host " + 
+            node.getHostName());
+        throw new UnresolvedTopologyException("The dependency call returned " + 
+            "null for host " + node.getHostName());
+      }
+    }
+
+    return dependencies;
+  }
+
   /**
    * Remove an already decommissioned data node who is neither in include nor
    * exclude hosts lists from the the list of live or dead nodes.  This is used
@@ -869,12 +920,14 @@ public class DatanodeManager {
           nodeS.setDisallowed(false); // Node is in the include list
 
           // resolve network location
-          if(this.rejectUnresolvedTopologyDN)
-          {
-            nodeS.setNetworkLocation(resolveNetworkLocation(nodeS));  
+          if(this.rejectUnresolvedTopologyDN) {
+            nodeS.setNetworkLocation(resolveNetworkLocation(nodeS));
+            nodeS.setDependentHostNames(getNetworkDependencies(nodeS));
           } else {
             nodeS.setNetworkLocation(
                 resolveNetworkLocationWithFallBackToDefaultLocation(nodeS));
+            nodeS.setDependentHostNames(
+                getNetworkDependenciesWithDefault(nodeS));
           }
           getNetworkTopology().add(nodeS);
             
@@ -900,9 +953,12 @@ public class DatanodeManager {
         // resolve network location
         if(this.rejectUnresolvedTopologyDN) {
           nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr));
+          nodeDescr.setDependentHostNames(getNetworkDependencies(nodeDescr));
         } else {
           nodeDescr.setNetworkLocation(
               resolveNetworkLocationWithFallBackToDefaultLocation(nodeDescr));
+          nodeDescr.setDependentHostNames(
+              getNetworkDependenciesWithDefault(nodeDescr));
         }
         networktopology.add(nodeDescr);
         nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion());
@@ -1183,10 +1239,15 @@ public class DatanodeManager {
   /** For generating datanode reports */
   public List<DatanodeDescriptor> getDatanodeListForReport(
       final DatanodeReportType type) {
-    boolean listLiveNodes = type == DatanodeReportType.ALL ||
-                            type == DatanodeReportType.LIVE;
-    boolean listDeadNodes = type == DatanodeReportType.ALL ||
-                            type == DatanodeReportType.DEAD;
+    final boolean listLiveNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.LIVE;
+    final boolean listDeadNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.DEAD;
+    final boolean listDecommissioningNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.DECOMMISSIONING;
 
     ArrayList<DatanodeDescriptor> nodes;
     final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet();
@@ -1197,7 +1258,10 @@ public class DatanodeManager {
       nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
       for (DatanodeDescriptor dn : datanodeMap.values()) {
         final boolean isDead = isDatanodeDead(dn);
-        if ((listLiveNodes && !isDead) || (listDeadNodes && isDead)) {
+        final boolean isDecommissioning = dn.isDecommissionInProgress();
+        if ((listLiveNodes && !isDead) ||
+            (listDeadNodes && isDead) ||
+            (listDecommissioningNodes && isDecommissioning)) {
             nodes.add(dn);
         }
         foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));

+ 38 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 class Host2NodesMap {
+  private HashMap<String, String> mapHost = new HashMap<String, String>();
   private final HashMap<String, DatanodeDescriptor[]> map
     = new HashMap<String, DatanodeDescriptor[]>();
   private final ReadWriteLock hostmapLock = new ReentrantReadWriteLock();
@@ -69,6 +70,10 @@ class Host2NodesMap {
       }
       
       String ipAddr = node.getIpAddr();
+      String hostname = node.getHostName();
+      
+      mapHost.put(hostname, ipAddr);
+      
       DatanodeDescriptor[] nodes = map.get(ipAddr);
       DatanodeDescriptor[] newNodes;
       if (nodes==null) {
@@ -95,6 +100,7 @@ class Host2NodesMap {
     }
       
     String ipAddr = node.getIpAddr();
+    String hostname = node.getHostName();
     hostmapLock.writeLock().lock();
     try {
 
@@ -105,6 +111,8 @@ class Host2NodesMap {
       if (nodes.length==1) {
         if (nodes[0]==node) {
           map.remove(ipAddr);
+          //remove hostname key since last datanode is removed
+          mapHost.remove(hostname);
           return true;
         } else {
           return false;
@@ -188,12 +196,40 @@ class Host2NodesMap {
     }
   }
 
+  
+
+  /** get a data node by its hostname. This should be used if only one 
+   * datanode service is running on a hostname. If multiple datanodes
+   * are running on a hostname then use methods getDataNodeByXferAddr and
+   * getDataNodeByHostNameAndPort.
+   * @return DatanodeDescriptor if found; otherwise null.
+   */
+  DatanodeDescriptor getDataNodeByHostName(String hostname) {
+    if(hostname == null) {
+      return null;
+    }
+    
+    hostmapLock.readLock().lock();
+    try {
+      String ipAddr = mapHost.get(hostname);
+      if(ipAddr == null) {
+        return null;
+      } else {  
+        return getDatanodeByHost(ipAddr);
+      }
+    } finally {
+      hostmapLock.readLock().unlock();
+    }
+  }
+
   @Override
   public String toString() {
     final StringBuilder b = new StringBuilder(getClass().getSimpleName())
         .append("[");
-    for(Map.Entry<String, DatanodeDescriptor[]> e : map.entrySet()) {
-      b.append("\n  " + e.getKey() + " => " + Arrays.asList(e.getValue()));
+    for(Map.Entry<String, String> host: mapHost.entrySet()) {
+      DatanodeDescriptor[] e = map.get(host.getValue());
+      b.append("\n  " + host.getKey() + " => "+host.getValue() + " => " 
+          + Arrays.asList(e));
     }
     return b.append("\n]").toString();
   }

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java

@@ -76,6 +76,27 @@ class PendingDataNodeMessages {
     }
   }
   
+  /**
+   * Remove all pending DN messages which reference the given DN.
+   * @param dn the datanode whose messages we should remove.
+   */
+  void removeAllMessagesForDatanode(DatanodeDescriptor dn) {
+    for (Map.Entry<Block, Queue<ReportedBlockInfo>> entry :
+        queueByBlockId.entrySet()) {
+      Queue<ReportedBlockInfo> newQueue = Lists.newLinkedList();
+      Queue<ReportedBlockInfo> oldQueue = entry.getValue();
+      while (!oldQueue.isEmpty()) {
+        ReportedBlockInfo rbi = oldQueue.remove();
+        if (!rbi.getNode().equals(dn)) {
+          newQueue.add(rbi);
+        } else {
+          count--;
+        }
+      }
+      queueByBlockId.put(entry.getKey(), newQueue);
+    }
+  }
+  
   void enqueueReportedBlock(DatanodeDescriptor dn, String storageID, Block block,
       ReplicaState reportedState) {
     block = new Block(block);

+ 10 - 562
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -18,58 +18,16 @@
 
 package org.apache.hadoop.hdfs.server.common;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.URL;
-import java.net.URLEncoder;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-
-import javax.servlet.ServletContext;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.jsp.JspWriter;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockReader;
-import org.apache.hadoop.hdfs.BlockReaderFactory;
-import org.apache.hadoop.hdfs.ClientContext;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.RemotePeerFactory;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.http.HtmlQuoting;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -78,484 +36,26 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.VersionInfo;
 
-import com.google.common.base.Charsets;
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletRequest;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
 
 @InterfaceAudience.Private
 public class JspHelper {
   public static final String CURRENT_CONF = "current.conf";
   public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
   public static final String NAMENODE_ADDRESS = "nnaddr";
-  static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
-                                              "=";
   private static final Log LOG = LogFactory.getLog(JspHelper.class);
 
   /** Private constructor for preventing creating JspHelper object. */
-  private JspHelper() {} 
-  
-  // data structure to count number of blocks on datanodes.
-  private static class NodeRecord extends DatanodeInfo {
-    int frequency;
-
-    public NodeRecord(DatanodeInfo info, int count) {
-      super(info);
-      this.frequency = count;
-    }
-    
-    @Override
-    public boolean equals(Object obj) {
-      // Sufficient to use super equality as datanodes are uniquely identified
-      // by DatanodeID
-      return (this == obj) || super.equals(obj);
-    }
-    @Override
-    public int hashCode() {
-      // Super implementation is sufficient
-      return super.hashCode();
-    }
-  }
-
-  // compare two records based on their frequency
-  private static class NodeRecordComparator implements Comparator<NodeRecord> {
-
-    @Override
-    public int compare(NodeRecord o1, NodeRecord o2) {
-      if (o1.frequency < o2.frequency) {
-        return -1;
-      } else if (o1.frequency > o2.frequency) {
-        return 1;
-      } 
-      return 0;
-    }
-  }
-  
-  /**
-   * convenience method for canonicalizing host name.
-   * @param addr name:port or name 
-   * @return canonicalized host name
-   */
-   public static String canonicalize(String addr) {
-    // default port 1 is supplied to allow addr without port.
-    // the port will be ignored.
-    return NetUtils.createSocketAddr(addr, 1).getAddress()
-           .getCanonicalHostName();
-  }
-
-  /**
-   * A helper class that generates the correct URL for different schema.
-   *
-   */
-  public static final class Url {
-    public static String authority(String scheme, DatanodeID d) {
-      String fqdn = (d.getIpAddr() != null && !d.getIpAddr().isEmpty())?
-          canonicalize(d.getIpAddr()): 
-          d.getHostName();
-      if (scheme.equals("http")) {
-        return fqdn + ":" + d.getInfoPort();
-      } else if (scheme.equals("https")) {
-        return fqdn + ":" + d.getInfoSecurePort();
-      } else {
-        throw new IllegalArgumentException("Unknown scheme:" + scheme);
-      }
-    }
-
-    public static String url(String scheme, DatanodeID d) {
-      return scheme + "://" + authority(scheme, d);
-    }
-  }
-
-  public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf)
-      throws IOException {
-    HashMap<DatanodeInfo, NodeRecord> map =
-      new HashMap<DatanodeInfo, NodeRecord>();
-    for (LocatedBlock block : blks.getLocatedBlocks()) {
-      DatanodeInfo[] nodes = block.getLocations();
-      for (DatanodeInfo node : nodes) {
-        NodeRecord record = map.get(node);
-        if (record == null) {
-          map.put(node, new NodeRecord(node, 1));
-        } else {
-          record.frequency++;
-        }
-      }
-    }
-    NodeRecord[] nodes = map.values().toArray(new NodeRecord[map.size()]);
-    Arrays.sort(nodes, new NodeRecordComparator());
-    return bestNode(nodes, false);
-  }
-
-  public static DatanodeInfo bestNode(LocatedBlock blk, Configuration conf)
-      throws IOException {
-    DatanodeInfo[] nodes = blk.getLocations();
-    return bestNode(nodes, true);
-  }
-
-  private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
-      throws IOException {
-    if (nodes == null || nodes.length == 0) {
-      throw new IOException("No nodes contain this block");
-    }
-    int l = 0;
-    while (l < nodes.length && !nodes[l].isDecommissioned()) {
-      ++l;
-    }
-
-    if (l == 0) {
-      throw new IOException("No active nodes contain this block");
-    }
-
-    int index = doRandom ? DFSUtil.getRandom().nextInt(l) : 0;
-    return nodes[index];
-  }
-
-  public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
-      long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
-      long blockSize, long offsetIntoBlock, long chunkSizeToView,
-      JspWriter out, final Configuration conf, DFSClient.Conf dfsConf,
-      final DataEncryptionKey encryptionKey)
-          throws IOException {
-    if (chunkSizeToView == 0) return;
-    int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
-      
-    BlockReader blockReader = new BlockReaderFactory(dfsConf).
-      setInetSocketAddress(addr).
-      setBlock(new ExtendedBlock(poolId, blockId, 0, genStamp)).
-      setFileName(BlockReaderFactory.getFileName(addr, poolId, blockId)).
-      setBlockToken(blockToken).
-      setStartOffset(offsetIntoBlock).
-      setLength(amtToRead).
-      setVerifyChecksum(true).
-      setClientName("JspHelper").
-      setClientCacheContext(ClientContext.getFromConf(conf)).
-      setDatanodeInfo(new DatanodeInfo(
-          new DatanodeID(addr.getAddress().getHostAddress(),
-              addr.getHostName(), poolId, addr.getPort(), 0, 0, 0))).
-      setCachingStrategy(CachingStrategy.newDefaultStrategy()).
-      setConfiguration(conf).
-      setRemotePeerFactory(new RemotePeerFactory() {
-        @Override
-        public Peer newConnectedPeer(InetSocketAddress addr)
-            throws IOException {
-          Peer peer = null;
-          Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
-          try {
-            sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
-            sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
-            peer = TcpPeerServer.peerFromSocketAndKey(sock, encryptionKey);
-          } finally {
-            if (peer == null) {
-              IOUtils.closeSocket(sock);
-            }
-          }
-          return peer;
-        }
-      }).
-      build();
-
-    final byte[] buf = new byte[amtToRead];
-    try {
-      int readOffset = 0;
-      int retries = 2;
-      while (amtToRead > 0) {
-        int numRead = amtToRead;
-        try {
-          blockReader.readFully(buf, readOffset, amtToRead);
-        } catch (IOException e) {
-          retries--;
-          if (retries == 0)
-            throw new IOException("Could not read data from datanode");
-          continue;
-        }
-        amtToRead -= numRead;
-        readOffset += numRead;
-      }
-    } finally {
-      blockReader.close();
-    }
-    out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
-  }
-
-  public static void addTableHeader(JspWriter out) throws IOException {
-    out.print("<table border=\"1\""+
-              " cellpadding=\"2\" cellspacing=\"2\">");
-    out.print("<tbody>");
-  }
-  public static void addTableRow(JspWriter out, String[] columns) throws IOException {
-    out.print("<tr>");
-    for (int i = 0; i < columns.length; i++) {
-      out.print("<td style=\"vertical-align: top;\"><B>"+columns[i]+"</B><br></td>");
-    }
-    out.print("</tr>");
-  }
-  public static void addTableRow(JspWriter out, String[] columns, int row) throws IOException {
-    out.print("<tr>");
-      
-    for (int i = 0; i < columns.length; i++) {
-      if (row/2*2 == row) {//even
-        out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
-      } else {
-        out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
-          
-      }
-    }
-    out.print("</tr>");
-  }
-  public static void addTableFooter(JspWriter out) throws IOException {
-    out.print("</tbody></table>");
-  }
-
-  public static void sortNodeList(final List<DatanodeDescriptor> nodes,
-                           String field, String order) {
-        
-    class NodeComapare implements Comparator<DatanodeDescriptor> {
-      static final int 
-        FIELD_NAME              = 1,
-        FIELD_LAST_CONTACT      = 2,
-        FIELD_BLOCKS            = 3,
-        FIELD_CAPACITY          = 4,
-        FIELD_USED              = 5,
-        FIELD_PERCENT_USED      = 6,
-        FIELD_NONDFS_USED       = 7,
-        FIELD_REMAINING         = 8,
-        FIELD_PERCENT_REMAINING = 9,
-        FIELD_ADMIN_STATE       = 10,
-        FIELD_DECOMMISSIONED    = 11,
-        FIELD_BLOCKPOOL_USED    = 12,
-        FIELD_PERBLOCKPOOL_USED = 13,
-        FIELD_FAILED_VOLUMES    = 14,
-        SORT_ORDER_ASC          = 1,
-        SORT_ORDER_DSC          = 2;
-
-      int sortField = FIELD_NAME;
-      int sortOrder = SORT_ORDER_ASC;
-            
-      public NodeComapare(String field, String order) {
-        if (field.equals("lastcontact")) {
-          sortField = FIELD_LAST_CONTACT;
-        } else if (field.equals("capacity")) {
-          sortField = FIELD_CAPACITY;
-        } else if (field.equals("used")) {
-          sortField = FIELD_USED;
-        } else if (field.equals("nondfsused")) {
-          sortField = FIELD_NONDFS_USED;
-        } else if (field.equals("remaining")) {
-          sortField = FIELD_REMAINING;
-        } else if (field.equals("pcused")) {
-          sortField = FIELD_PERCENT_USED;
-        } else if (field.equals("pcremaining")) {
-          sortField = FIELD_PERCENT_REMAINING;
-        } else if (field.equals("blocks")) {
-          sortField = FIELD_BLOCKS;
-        } else if (field.equals("adminstate")) {
-          sortField = FIELD_ADMIN_STATE;
-        } else if (field.equals("decommissioned")) {
-          sortField = FIELD_DECOMMISSIONED;
-        } else if (field.equals("bpused")) {
-          sortField = FIELD_BLOCKPOOL_USED;
-        } else if (field.equals("pcbpused")) {
-          sortField = FIELD_PERBLOCKPOOL_USED;
-        } else if (field.equals("volfails")) {
-          sortField = FIELD_FAILED_VOLUMES;
-        } else {
-          sortField = FIELD_NAME;
-        }
-                
-        if (order.equals("DSC")) {
-          sortOrder = SORT_ORDER_DSC;
-        } else {
-          sortOrder = SORT_ORDER_ASC;
-        }
-      }
-
-      @Override
-      public int compare(DatanodeDescriptor d1,
-                         DatanodeDescriptor d2) {
-        int ret = 0;
-        switch (sortField) {
-        case FIELD_LAST_CONTACT:
-          ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
-          break;
-        case FIELD_CAPACITY:
-          long  dlong = d1.getCapacity() - d2.getCapacity();
-          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
-          break;
-        case FIELD_USED:
-          dlong = d1.getDfsUsed() - d2.getDfsUsed();
-          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
-          break;
-        case FIELD_NONDFS_USED:
-          dlong = d1.getNonDfsUsed() - d2.getNonDfsUsed();
-          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
-          break;
-        case FIELD_REMAINING:
-          dlong = d1.getRemaining() - d2.getRemaining();
-          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
-          break;
-        case FIELD_PERCENT_USED:
-          double ddbl =((d1.getDfsUsedPercent())-
-                        (d2.getDfsUsedPercent()));
-          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
-          break;
-        case FIELD_PERCENT_REMAINING:
-          ddbl =((d1.getRemainingPercent())-
-                 (d2.getRemainingPercent()));
-          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
-          break;
-        case FIELD_BLOCKS:
-          ret = d1.numBlocks() - d2.numBlocks();
-          break;
-        case FIELD_ADMIN_STATE:
-          ret = d1.getAdminState().toString().compareTo(
-              d2.getAdminState().toString());
-          break;
-        case FIELD_DECOMMISSIONED:
-          ret = DFSUtil.DECOM_COMPARATOR.compare(d1, d2);
-          break;
-        case FIELD_NAME: 
-          ret = d1.getHostName().compareTo(d2.getHostName());
-          break;
-        case FIELD_BLOCKPOOL_USED:
-          dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed();
-          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
-          break;
-        case FIELD_PERBLOCKPOOL_USED:
-          ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent();
-          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
-          break;
-        case FIELD_FAILED_VOLUMES:
-          int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
-          ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
-          break;
-        default:
-          throw new IllegalArgumentException("Invalid sortField");
-        }
-        return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
-      }
-    }
-        
-    Collections.sort(nodes, new NodeComapare(field, order));
-  }
-
-  public static void printPathWithLinks(String dir, JspWriter out, 
-                                        int namenodeInfoPort,
-                                        String tokenString,
-                                        String nnAddress
-                                        ) throws IOException {
-    try {
-      String[] parts = dir.split(Path.SEPARATOR);
-      StringBuilder tempPath = new StringBuilder(dir.length());
-      out.print("<a href=\"browseDirectory.jsp" + "?dir="+ Path.SEPARATOR
-          + "&namenodeInfoPort=" + namenodeInfoPort
-          + getDelegationTokenUrlParam(tokenString) 
-          + getUrlParam(NAMENODE_ADDRESS, nnAddress) + "\">" + Path.SEPARATOR
-          + "</a>");
-      tempPath.append(Path.SEPARATOR);
-      for (int i = 0; i < parts.length-1; i++) {
-        if (!parts[i].equals("")) {
-          tempPath.append(parts[i]);
-          out.print("<a href=\"browseDirectory.jsp" + "?dir="
-              + HtmlQuoting.quoteHtmlChars(tempPath.toString()) + "&namenodeInfoPort=" + namenodeInfoPort
-              + getDelegationTokenUrlParam(tokenString)
-              + getUrlParam(NAMENODE_ADDRESS, nnAddress));
-          out.print("\">" + HtmlQuoting.quoteHtmlChars(parts[i]) + "</a>" + Path.SEPARATOR);
-          tempPath.append(Path.SEPARATOR);
-        }
-      }
-      if(parts.length > 0) {
-        out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1]));
-      }
-    }
-    catch (UnsupportedEncodingException ex) {
-      ex.printStackTrace();
-    }
-  }
-
-  public static void printGotoForm(JspWriter out,
-                                   int namenodeInfoPort,
-                                   String tokenString,
-                                   String file,
-                                   String nnAddress) throws IOException {
-    out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
-    out.print("Goto : ");
-    out.print("<input name=\"dir\" type=\"text\" width=\"50\" id=\"dir\" value=\""+ HtmlQuoting.quoteHtmlChars(file)+"\"/>");
-    out.print("<input name=\"go\" type=\"submit\" value=\"go\"/>");
-    out.print("<input name=\"namenodeInfoPort\" type=\"hidden\" "
-        + "value=\"" + namenodeInfoPort  + "\"/>");
-    if (UserGroupInformation.isSecurityEnabled()) {
-      out.print("<input name=\"" + DELEGATION_PARAMETER_NAME
-          + "\" type=\"hidden\" value=\"" + tokenString + "\"/>");
-    }
-    out.print("<input name=\""+ NAMENODE_ADDRESS +"\" type=\"hidden\" "
-        + "value=\"" + nnAddress  + "\"/>");
-    out.print("</form>");
-  }
-  
-  public static void createTitle(JspWriter out, 
-                                 HttpServletRequest req, 
-                                 String  file) throws IOException{
-    if(file == null) file = "";
-    int start = Math.max(0,file.length() - 100);
-    if(start != 0)
-      file = "..." + file.substring(start, file.length());
-    out.print("<title>HDFS:" + file + "</title>");
-  }
-
-  /** Convert a String to chunk-size-to-view. */
-  public static int string2ChunkSizeToView(String s, int defaultValue) {
-    int n = s == null? 0: Integer.parseInt(s);
-    return n > 0? n: defaultValue;
-  }
-
-  /** Return a table containing version information. */
-  public static String getVersionTable() {
-    return "<div class='dfstable'><table>"       
-        + "\n  <tr><td class='col1'>Version:</td><td>" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision() + "</td></tr>"
-        + "\n  <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch() + "</td></tr>"
-        + "\n</table></div>";
-  }
-
-  /**
-   * Validate filename. 
-   * @return null if the filename is invalid.
-   *         Otherwise, return the validated filename.
-   */
-  public static String validatePath(String p) {
-    return p == null || p.length() == 0?
-        null: new Path(p).toUri().getPath();
-  }
-
-  /**
-   * Validate a long value. 
-   * @return null if the value is invalid.
-   *         Otherwise, return the validated Long object.
-   */
-  public static Long validateLong(String value) {
-    return value == null? null: Long.parseLong(value);
-  }
-
-  /**
-   * Validate a URL.
-   * @return null if the value is invalid.
-   *         Otherwise, return the validated URL String.
-   */
-  public static String validateURL(String value) {
-    try {
-      return URLEncoder.encode(new URL(value).toString(), "UTF-8");
-    } catch (IOException e) {
-      return null;
-    }
-  }
-  
-  /**
-   * If security is turned off, what is the default web user?
-   * @param conf the configuration to look in
-   * @return the remote user that was configuration
-   */
-  public static UserGroupInformation getDefaultWebUser(Configuration conf
-                                                       ) throws IOException {
-    return UserGroupInformation.createRemoteUser(getDefaultWebUserName(conf));
-  }
+  private JspHelper() {}
 
   private static String getDefaultWebUserName(Configuration conf
       ) throws IOException {
@@ -736,56 +236,4 @@ public class JspHelper {
     return username;
   }
 
-  /**
-   * Returns the url parameter for the given token string.
-   * @param tokenString
-   * @return url parameter
-   */
-  public static String getDelegationTokenUrlParam(String tokenString) {
-    if (tokenString == null ) {
-      return "";
-    }
-    if (UserGroupInformation.isSecurityEnabled()) {
-      return SET_DELEGATION + tokenString;
-    } else {
-      return "";
-    }
-  }
-
-  /**
-   * Returns the url parameter for the given string, prefixed with
-   * paramSeparator.
-   * 
-   * @param name parameter name
-   * @param val parameter value
-   * @param paramSeparator URL parameter prefix, i.e. either '?' or '&'
-   * @return url parameter
-   */
-  public static String getUrlParam(String name, String val, String paramSeparator) {
-    return val == null ? "" : paramSeparator + name + "=" + val;
-  }
-  
-  /**
-   * Returns the url parameter for the given string, prefixed with '?' if
-   * firstParam is true, prefixed with '&' if firstParam is false.
-   * 
-   * @param name parameter name
-   * @param val parameter value
-   * @param firstParam true if this is the first parameter in the list, false otherwise
-   * @return url parameter
-   */
-  public static String getUrlParam(String name, String val, boolean firstParam) {
-    return getUrlParam(name, val, firstParam ? "?" : "&");
-  }
-  
-  /**
-   * Returns the url parameter for the given string, prefixed with '&'.
-   * 
-   * @param name parameter name
-   * @param val parameter value
-   * @return url parameter
-   */
-  public static String getUrlParam(String name, String val) {
-    return getUrlParam(name, val, false);
-  }
 }

+ 18 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -145,7 +145,11 @@ class BPOfferService {
       return null;
     }
   }
-  
+
+  boolean hasBlockPoolId() {
+    return getNamespaceInfo() != null;
+  }
+
   synchronized NamespaceInfo getNamespaceInfo() {
     return bpNSInfo;
   }
@@ -679,4 +683,17 @@ class BPOfferService {
     return true;
   }
 
+  /*
+   * Let the actor retry for initialization until all namenodes of cluster have
+   * failed.
+   */
+  boolean shouldRetryInit() {
+    if (hasBlockPoolId()) {
+      // One of the namenode registered successfully. lets continue retry for
+      // other.
+      return true;
+    }
+    return isAlive();
+  }
+
 }

+ 40 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -90,8 +90,13 @@ class BPServiceActor implements Runnable {
   Thread bpThread;
   DatanodeProtocolClientSideTranslatorPB bpNamenode;
   private volatile long lastHeartbeat = 0;
-  private volatile boolean initialized = false;
-  
+
+  static enum RunningState {
+    CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED;
+  }
+
+  private volatile RunningState runningState = RunningState.CONNECTING;
+
   /**
    * Between block reports (which happen on the order of once an hour) the
    * DN reports smaller incremental changes to its block list. This map,
@@ -118,17 +123,12 @@ class BPServiceActor implements Runnable {
     this.dnConf = dn.getDnConf();
   }
 
-  /**
-   * returns true if BP thread has completed initialization of storage
-   * and has registered with the corresponding namenode
-   * @return true if initialized
-   */
-  boolean isInitialized() {
-    return initialized;
-  }
-  
   boolean isAlive() {
-    return shouldServiceRun && bpThread.isAlive();
+    if (!shouldServiceRun || !bpThread.isAlive()) {
+      return false;
+    }
+    return runningState == BPServiceActor.RunningState.RUNNING
+        || runningState == BPServiceActor.RunningState.CONNECTING;
   }
 
   @Override
@@ -805,19 +805,30 @@ class BPServiceActor implements Runnable {
     LOG.info(this + " starting to offer service");
 
     try {
-      // init stuff
-      try {
-        // setup storage
-        connectToNNAndHandshake();
-      } catch (IOException ioe) {
-        // Initial handshake, storage recovery or registration failed
-        // End BPOfferService thread
-        LOG.fatal("Initialization failed for block pool " + this, ioe);
-        return;
+      while (true) {
+        // init stuff
+        try {
+          // setup storage
+          connectToNNAndHandshake();
+          break;
+        } catch (IOException ioe) {
+          // Initial handshake, storage recovery or registration failed
+          runningState = RunningState.INIT_FAILED;
+          if (shouldRetryInit()) {
+            // Retry until all namenode's of BPOS failed initialization
+            LOG.error("Initialization failed for " + this + " "
+                + ioe.getLocalizedMessage());
+            sleepAndLogInterrupts(5000, "initializing");
+          } else {
+            runningState = RunningState.FAILED;
+            LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe);
+            return;
+          }
+        }
       }
 
-      initialized = true; // bp is initialized;
-      
+      runningState = RunningState.RUNNING;
+
       while (shouldRun()) {
         try {
           offerService();
@@ -826,14 +837,20 @@ class BPServiceActor implements Runnable {
           sleepAndLogInterrupts(5000, "offering service");
         }
       }
+      runningState = RunningState.EXITED;
     } catch (Throwable ex) {
       LOG.warn("Unexpected exception in block pool " + this, ex);
+      runningState = RunningState.FAILED;
     } finally {
       LOG.warn("Ending block pool service for: " + this);
       cleanUp();
     }
   }
 
+  private boolean shouldRetryInit() {
+    return shouldRun() && bpos.shouldRetryInit();
+  }
+
   private boolean shouldRun() {
     return shouldServiceRun && dn.shouldRun();
   }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java

@@ -88,7 +88,11 @@ class BlockPoolManager {
   
   synchronized void remove(BPOfferService t) {
     offerServices.remove(t);
-    bpByBlockPoolId.remove(t.getBlockPoolId());
+    if (t.hasBlockPoolId()) {
+      // It's possible that the block pool never successfully registered
+      // with any NN, so it was never added it to this map
+      bpByBlockPoolId.remove(t.getBlockPoolId());
+    }
     
     boolean removed = false;
     for (Iterator<BPOfferService> it = bpByNameserviceId.values().iterator();

+ 19 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -847,19 +847,24 @@ public class DataNode extends Configured
    */
   void shutdownBlockPool(BPOfferService bpos) {
     blockPoolManager.remove(bpos);
+    if (bpos.hasBlockPoolId()) {
+      // Possible that this is shutting down before successfully
+      // registering anywhere. If that's the case, we wouldn't have
+      // a block pool id
+      String bpId = bpos.getBlockPoolId();
+      if (blockScanner != null) {
+        blockScanner.removeBlockPool(bpId);
+      }
 
-    String bpId = bpos.getBlockPoolId();
-    if (blockScanner != null) {
-      blockScanner.removeBlockPool(bpId);
-    }
-  
-    if (data != null) { 
-      data.shutdownBlockPool(bpId);
-    }
+      if (data != null) {
+        data.shutdownBlockPool(bpId);
+      }
 
-    if (storage != null) {
-      storage.removeBlockPoolStorage(bpId);
+      if (storage != null) {
+        storage.removeBlockPoolStorage(bpId);
+      }
     }
+
   }
 
   /**
@@ -880,10 +885,10 @@ public class DataNode extends Configured
           + " should have retrieved namespace info before initBlockPool.");
     }
     
+    setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
+
     // Register the new block pool with the BP manager.
     blockPoolManager.addBlockPool(bpos);
-
-    setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
     
     // In the case that this is the first block pool to connect, initialize
     // the dataset, block scanners, etc.
@@ -1067,6 +1072,7 @@ public class DataNode extends Configured
       Token<BlockTokenIdentifier> token) throws IOException {
     checkBlockLocalPathAccess();
     checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
+    Preconditions.checkNotNull(data, "Storage not yet initialized");
     BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
     if (LOG.isDebugEnabled()) {
       if (info != null) {
@@ -2427,6 +2433,7 @@ public class DataNode extends Configured
    */
   @Override // DataNodeMXBean
   public String getVolumeInfo() {
+    Preconditions.checkNotNull(data, "Storage not yet initialized");
     return JSON.toString(data.getVolumeInfoMap());
   }
   

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -221,11 +221,16 @@ public class DataStorage extends Storage {
     // Each storage directory is treated individually.
     // During startup some of them can upgrade or rollback 
     // while others could be uptodate for the regular startup.
-    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
-      doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
-      createStorageID(getStorageDir(idx));
+    try {
+      for (int idx = 0; idx < getNumStorageDirs(); idx++) {
+        doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
+        createStorageID(getStorageDir(idx));
+      }
+    } catch (IOException e) {
+      unlockAll();
+      throw e;
     }
-    
+
     // 3. Update all storages. Some of them might have just been formatted.
     this.writeAll();
     

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff