瀏覽代碼

HADOOP-13597. Switch KMS from Tomcat to Jetty. Contributed by John Zhuge.

Xiao Chen 8 年之前
父節點
當前提交
5d182949ba
共有 31 個文件被更改,包括 1143 次插入1152 次删除
  1. 24 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
  2. 3 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  3. 35 16
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  4. 113 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
  5. 120 43
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  6. 50 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
  7. 6 2
      hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
  8. 42 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_mkdir.bats
  9. 33 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_using_envvar.bats
  10. 1 1
      hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
  11. 11 149
      hadoop-common-project/hadoop-kms/pom.xml
  12. 15 33
      hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
  13. 1 2
      hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
  14. 2 165
      hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
  15. 16 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  16. 0 36
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java
  17. 8 15
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  18. 155 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
  19. 0 72
      hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
  20. 57 0
      hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh
  21. 248 0
      hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
  22. 1 11
      hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
  23. 10 2
      hadoop-common-project/hadoop-kms/src/main/resources/webapps/static/index.html
  24. 38 78
      hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
  25. 0 16
      hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/WEB-INF/web.xml
  26. 0 67
      hadoop-common-project/hadoop-kms/src/main/tomcat/logging.properties
  27. 0 155
      hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml
  28. 0 136
      hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf
  29. 49 0
      hadoop-common-project/hadoop-kms/src/site/configuration.xsl
  30. 84 38
      hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
  31. 21 97
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java

+ 24 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

@@ -21,6 +21,14 @@
   </formats>
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
   <includeBaseDirectory>false</includeBaseDirectory>
   <fileSets>
   <fileSets>
+    <!-- Jar file -->
+    <fileSet>
+      <directory>target</directory>
+      <outputDirectory>/share/hadoop/common</outputDirectory>
+      <includes>
+        <include>${project.artifactId}-${project.version}.jar</include>
+      </includes>
+    </fileSet>
     <!-- Configuration files -->
     <!-- Configuration files -->
     <fileSet>
     <fileSet>
       <directory>${basedir}/src/main/conf</directory>
       <directory>${basedir}/src/main/conf</directory>
@@ -41,7 +49,7 @@
       <directory>${basedir}/src/main/libexec</directory>
       <directory>${basedir}/src/main/libexec</directory>
       <outputDirectory>/libexec</outputDirectory>
       <outputDirectory>/libexec</outputDirectory>
       <includes>
       <includes>
-        <include>*</include>
+        <include>**/*</include>
       </includes>
       </includes>
       <fileMode>0755</fileMode>
       <fileMode>0755</fileMode>
     </fileSet>
     </fileSet>
@@ -51,4 +59,19 @@
       <outputDirectory>/share/doc/hadoop/kms</outputDirectory>
       <outputDirectory>/share/doc/hadoop/kms</outputDirectory>
     </fileSet>
     </fileSet>
   </fileSets>
   </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <useProjectArtifact>false</useProjectArtifact>
+      <outputDirectory>/share/hadoop/common/lib</outputDirectory>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+        <exclude>org.hsqldb:hsqldb</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
 </assembly>
 </assembly>

+ 3 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -142,7 +142,7 @@ public class AuthenticationFilter implements Filter {
   private String cookieDomain;
   private String cookieDomain;
   private String cookiePath;
   private String cookiePath;
   private boolean isCookiePersistent;
   private boolean isCookiePersistent;
-  private boolean isInitializedByTomcat;
+  private boolean destroySecretProvider;
 
 
   /**
   /**
    * <p>Initializes the authentication filter and signer secret provider.</p>
    * <p>Initializes the authentication filter and signer secret provider.</p>
@@ -209,7 +209,7 @@ public class AuthenticationFilter implements Filter {
         secretProvider = constructSecretProvider(
         secretProvider = constructSecretProvider(
             filterConfig.getServletContext(),
             filterConfig.getServletContext(),
             config, false);
             config, false);
-        isInitializedByTomcat = true;
+        destroySecretProvider = true;
       } catch (Exception ex) {
       } catch (Exception ex) {
         throw new ServletException(ex);
         throw new ServletException(ex);
       }
       }
@@ -356,7 +356,7 @@ public class AuthenticationFilter implements Filter {
       authHandler.destroy();
       authHandler.destroy();
       authHandler = null;
       authHandler = null;
     }
     }
-    if (secretProvider != null && isInitializedByTomcat) {
+    if (secretProvider != null && destroySecretProvider) {
       secretProvider.destroy();
       secretProvider.destroy();
       secretProvider = null;
       secretProvider = null;
     }
     }

+ 35 - 16
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -262,6 +262,39 @@ function hadoop_deprecate_envvar
   fi
   fi
 }
 }
 
 
+## @description  Declare `var` being used and print its value.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        var
+function hadoop_using_envvar
+{
+  local var=$1
+  local val=${!var}
+
+  if [[ -n "${val}" ]]; then
+    hadoop_debug "${var} = ${val}"
+  fi
+}
+
+## @description  Create the directory 'dir'.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        dir
+function hadoop_mkdir
+{
+  local dir=$1
+
+  if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
+    hadoop_error "WARNING: ${dir} does not exist. Creating."
+    if ! mkdir -p "${dir}"; then
+      hadoop_error "ERROR: Unable to create ${dir}. Aborting."
+      exit 1
+    fi
+  fi
+}
+
 ## @description  Bootstraps the Hadoop shell environment
 ## @description  Bootstraps the Hadoop shell environment
 ## @audience     private
 ## @audience     private
 ## @stability    evolving
 ## @stability    evolving
@@ -1396,14 +1429,7 @@ function hadoop_verify_piddir
     hadoop_error "No pid directory defined."
     hadoop_error "No pid directory defined."
     exit 1
     exit 1
   fi
   fi
-  if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
-    hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
-    mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
-    if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
-      exit 1
-    fi
-  fi
+  hadoop_mkdir "${HADOOP_PID_DIR}"
   touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
   touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
     hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
     hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
@@ -1421,14 +1447,7 @@ function hadoop_verify_logdir
     hadoop_error "No log directory defined."
     hadoop_error "No log directory defined."
     exit 1
     exit 1
   fi
   fi
-  if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
-    hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
-    mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
-    if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
-      exit 1
-    fi
-  fi
+  hadoop_mkdir "${HADOOP_LOG_DIR}"
   touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
   touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
   if [[ $? -gt 0 ]]; then
     hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
     hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."

+ 113 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Logs access to {@link Configuration}.
+ * Sensitive data will be redacted.
+ */
+@InterfaceAudience.Private
+public class ConfigurationWithLogging extends Configuration {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ConfigurationWithLogging.class);
+
+  private final Logger log;
+  private final ConfigRedactor redactor;
+
+  public ConfigurationWithLogging(Configuration conf) {
+    super(conf);
+    log = LOG;
+    redactor = new ConfigRedactor(conf);
+  }
+
+  /**
+   * @see Configuration#get(String).
+   */
+  @Override
+  public String get(String name) {
+    String value = super.get(name);
+    log.info("Got {} = '{}'", name, redactor.redact(name, value));
+    return value;
+  }
+
+  /**
+   * @see Configuration#get(String, String).
+   */
+  @Override
+  public String get(String name, String defaultValue) {
+    String value = super.get(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name,
+        redactor.redact(name, value), redactor.redact(name, defaultValue));
+    return value;
+  }
+
+  /**
+   * @see Configuration#getBoolean(String, boolean).
+   */
+  @Override
+  public boolean getBoolean(String name, boolean defaultValue) {
+    boolean value = super.getBoolean(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getFloat(String, float).
+   */
+  @Override
+  public float getFloat(String name, float defaultValue) {
+    float value = super.getFloat(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getInt(String, int).
+   */
+  @Override
+  public int getInt(String name, int defaultValue) {
+    int value = super.getInt(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getLong(String, long).
+   */
+  @Override
+  public long getLong(String name, long defaultValue) {
+    long value = super.getLong(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#set(String, String, String).
+   */
+  @Override
+  public void set(String name, String value, String source) {
+    log.info("Set {} to '{}'{}", name, redactor.redact(name, value),
+        source == null ? "" : " from " + source);
+    super.set(name, value, source);
+  }
+}

+ 120 - 43
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -17,6 +17,10 @@
  */
  */
 package org.apache.hadoop.http;
 package org.apache.hadoop.http;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.io.InterruptedIOException;
@@ -45,7 +49,10 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -54,14 +61,15 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
-import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.log.LogLevel;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.eclipse.jetty.http.HttpVersion;
 import org.eclipse.jetty.http.HttpVersion;
@@ -90,16 +98,9 @@ import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.servlet.ServletMapping;
 import org.eclipse.jetty.servlet.ServletMapping;
 import org.eclipse.jetty.util.ArrayUtil;
 import org.eclipse.jetty.util.ArrayUtil;
 import org.eclipse.jetty.util.MultiException;
 import org.eclipse.jetty.util.MultiException;
-import org.eclipse.jetty.webapp.WebAppContext;
-import org.eclipse.jetty.util.thread.QueuedThreadPool;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.eclipse.jetty.webapp.WebAppContext;
 
 
 /**
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal is
  * Create a Jetty embedded server to answer http requests. The primary goal is
@@ -116,9 +117,22 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_US
 public final class HttpServer2 implements FilterContainer {
 public final class HttpServer2 implements FilterContainer {
   public static final Log LOG = LogFactory.getLog(HttpServer2.class);
   public static final Log LOG = LogFactory.getLog(HttpServer2.class);
 
 
+  public static final String HTTP_SCHEME = "http";
+  public static final String HTTPS_SCHEME = "https";
+
+  public static final String HTTP_MAX_REQUEST_HEADER_SIZE_KEY =
+      "hadoop.http.max.request.header.size";
+  public static final int HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT = 65536;
+  public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
+      "hadoop.http.max.response.header.size";
+  public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
+  public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
+
   static final String FILTER_INITIALIZER_PROPERTY
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";
       = "hadoop.http.filter.initializers";
-  public static final String HTTP_MAX_THREADS = "hadoop.http.max.threads";
+  @Deprecated
+  public static final String HTTP_MAX_THREADS = HTTP_MAX_THREADS_KEY;
 
 
   // The ServletContext attribute where the daemon Configuration
   // The ServletContext attribute where the daemon Configuration
   // gets stored.
   // gets stored.
@@ -158,6 +172,7 @@ public final class HttpServer2 implements FilterContainer {
     private ArrayList<URI> endpoints = Lists.newArrayList();
     private ArrayList<URI> endpoints = Lists.newArrayList();
     private String name;
     private String name;
     private Configuration conf;
     private Configuration conf;
+    private Configuration sslConf;
     private String[] pathSpecs;
     private String[] pathSpecs;
     private AccessControlList adminsAcl;
     private AccessControlList adminsAcl;
     private boolean securityEnabled = false;
     private boolean securityEnabled = false;
@@ -253,6 +268,15 @@ public final class HttpServer2 implements FilterContainer {
       return this;
       return this;
     }
     }
 
 
+    /**
+     * Specify the SSL configuration to load. This API provides an alternative
+     * to keyStore/keyPassword/trustStore.
+     */
+    public Builder setSSLConf(Configuration sslCnf) {
+      this.sslConf = sslCnf;
+      return this;
+    }
+
     public Builder setPathSpec(String[] pathSpec) {
     public Builder setPathSpec(String[] pathSpec) {
       this.pathSpecs = pathSpec;
       this.pathSpecs = pathSpec;
       return this;
       return this;
@@ -315,7 +339,45 @@ public final class HttpServer2 implements FilterContainer {
       return this;
       return this;
     }
     }
 
 
+    /**
+     * A wrapper of {@link Configuration#getPassword(String)}. It returns
+     * <code>String</code> instead of <code>char[]</code> and throws
+     * {@link IOException} when the password not found.
+     *
+     * @param conf the configuration
+     * @param name the property name
+     * @return the password string
+     */
+    private static String getPassword(Configuration conf, String name)
+        throws IOException {
+      char[] passchars = conf.getPassword(name);
+      if (passchars == null) {
+        throw new IOException("Password " + name + " not found");
+      }
+      return new String(passchars);
+    }
 
 
+    /**
+     * Load SSL properties from the SSL configuration.
+     */
+    private void loadSSLConfiguration() throws IOException {
+      if (sslConf == null) {
+        return;
+      }
+      needsClientAuth(sslConf.getBoolean(
+          SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH,
+          SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT));
+      keyStore(sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION),
+          getPassword(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD),
+          sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
+              SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT));
+      keyPassword(getPassword(sslConf,
+          SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD));
+      trustStore(sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION),
+          getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
+          sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
+              SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
+    }
 
 
     public HttpServer2 build() throws IOException {
     public HttpServer2 build() throws IOException {
       Preconditions.checkNotNull(name, "name is not set");
       Preconditions.checkNotNull(name, "name is not set");
@@ -335,15 +397,33 @@ public final class HttpServer2 implements FilterContainer {
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
       }
       }
 
 
+      for (URI ep : endpoints) {
+        if (HTTPS_SCHEME.equals(ep.getScheme())) {
+          loadSSLConfiguration();
+          break;
+        }
+      }
+
+      int requestHeaderSize = conf.getInt(
+          HTTP_MAX_REQUEST_HEADER_SIZE_KEY,
+          HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT);
+      int responseHeaderSize = conf.getInt(
+          HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
+          HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT);
+
+      HttpConfiguration httpConfig = new HttpConfiguration();
+      httpConfig.setRequestHeaderSize(requestHeaderSize);
+      httpConfig.setResponseHeaderSize(responseHeaderSize);
+
       for (URI ep : endpoints) {
       for (URI ep : endpoints) {
         final ServerConnector connector;
         final ServerConnector connector;
         String scheme = ep.getScheme();
         String scheme = ep.getScheme();
-        if ("http".equals(scheme)) {
-          connector =
-              HttpServer2.createDefaultChannelConnector(server.webServer);
-        } else if ("https".equals(scheme)) {
-          connector = createHttpsChannelConnector(server.webServer);
-
+        if (HTTP_SCHEME.equals(scheme)) {
+          connector = createHttpChannelConnector(server.webServer,
+              httpConfig);
+        } else if (HTTPS_SCHEME.equals(scheme)) {
+          connector = createHttpsChannelConnector(server.webServer,
+              httpConfig);
         } else {
         } else {
           throw new HadoopIllegalArgumentException(
           throw new HadoopIllegalArgumentException(
               "unknown scheme for endpoint:" + ep);
               "unknown scheme for endpoint:" + ep);
@@ -356,16 +436,20 @@ public final class HttpServer2 implements FilterContainer {
       return server;
       return server;
     }
     }
 
 
-    private ServerConnector createHttpsChannelConnector(Server server) {
+    private ServerConnector createHttpChannelConnector(
+        Server server, HttpConfiguration httpConfig) {
       ServerConnector conn = new ServerConnector(server);
       ServerConnector conn = new ServerConnector(server);
-      HttpConfiguration httpConfig = new HttpConfiguration();
-      httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setSecureScheme("https");
-      httpConfig.addCustomizer(new SecureRequestCustomizer());
       ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
       ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
       conn.addConnectionFactory(connFactory);
       conn.addConnectionFactory(connFactory);
       configureChannelConnector(conn);
       configureChannelConnector(conn);
+      return conn;
+    }
+
+    private ServerConnector createHttpsChannelConnector(
+        Server server, HttpConfiguration httpConfig) {
+      httpConfig.setSecureScheme(HTTPS_SCHEME);
+      httpConfig.addCustomizer(new SecureRequestCustomizer());
+      ServerConnector conn = createHttpChannelConnector(server, httpConfig);
 
 
       SslContextFactory sslContextFactory = new SslContextFactory();
       SslContextFactory sslContextFactory = new SslContextFactory();
       sslContextFactory.setNeedClientAuth(needsClientAuth);
       sslContextFactory.setNeedClientAuth(needsClientAuth);
@@ -397,7 +481,7 @@ public final class HttpServer2 implements FilterContainer {
     this.webServer = new Server();
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
     this.adminsAcl = b.adminsAcl;
     this.handlers = new HandlerCollection();
     this.handlers = new HandlerCollection();
-    this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    this.webAppContext = createWebAppContext(b, adminsAcl, appDir);
     this.xFrameOptionIsEnabled = b.xFrameEnabled;
     this.xFrameOptionIsEnabled = b.xFrameEnabled;
     this.xFrameOption = b.xFrameOption;
     this.xFrameOption = b.xFrameOption;
 
 
@@ -482,8 +566,8 @@ public final class HttpServer2 implements FilterContainer {
     listeners.add(connector);
     listeners.add(connector);
   }
   }
 
 
-  private static WebAppContext createWebAppContext(String name,
-      Configuration conf, AccessControlList adminsAcl, final String appDir) {
+  private static WebAppContext createWebAppContext(Builder b,
+      AccessControlList adminsAcl, final String appDir) {
     WebAppContext ctx = new WebAppContext();
     WebAppContext ctx = new WebAppContext();
     ctx.setDefaultsDescriptor(null);
     ctx.setDefaultsDescriptor(null);
     ServletHolder holder = new ServletHolder(new DefaultServlet());
     ServletHolder holder = new ServletHolder(new DefaultServlet());
@@ -496,10 +580,15 @@ public final class HttpServer2 implements FilterContainer {
     holder.setInitParameters(params);
     holder.setInitParameters(params);
     ctx.setWelcomeFiles(new String[] {"index.html"});
     ctx.setWelcomeFiles(new String[] {"index.html"});
     ctx.addServlet(holder, "/");
     ctx.addServlet(holder, "/");
-    ctx.setDisplayName(name);
+    ctx.setDisplayName(b.name);
     ctx.setContextPath("/");
     ctx.setContextPath("/");
-    ctx.setWar(appDir + "/" + name);
-    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+    ctx.setWar(appDir + "/" + b.name);
+    String tempDirectory = b.conf.get(HTTP_TEMP_DIR_KEY);
+    if (tempDirectory != null && !tempDirectory.isEmpty()) {
+      ctx.setTempDirectory(new File(tempDirectory));
+      ctx.setAttribute("javax.servlet.context.tempdir", tempDirectory);
+    }
+    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, b.conf);
     ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
     ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
     addNoCacheFilter(ctx);
     addNoCacheFilter(ctx);
     return ctx;
     return ctx;
@@ -541,18 +630,6 @@ public final class HttpServer2 implements FilterContainer {
     }
     }
   }
   }
 
 
-  @InterfaceAudience.Private
-  public static ServerConnector createDefaultChannelConnector(Server server) {
-    ServerConnector conn = new ServerConnector(server);
-    HttpConfiguration httpConfig = new HttpConfiguration();
-    httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
-    httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
-    ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
-    conn.addConnectionFactory(connFactory);
-    configureChannelConnector(conn);
-    return conn;
-  }
-
   /** Get an array of FilterConfiguration specified in the conf */
   /** Get an array of FilterConfiguration specified in the conf */
   private static FilterInitializer[] getFilterInitializers(Configuration conf) {
   private static FilterInitializer[] getFilterInitializers(Configuration conf) {
     if (conf == null) {
     if (conf == null) {
@@ -1056,7 +1133,7 @@ public final class HttpServer2 implements FilterContainer {
     }
     }
 
 
     try {
     try {
-      // explicitly destroy the secrete provider
+      // explicitly destroy the secret provider
       secretProvider.destroy();
       secretProvider.destroy();
       // clear & stop webAppContext attributes to avoid memory leaks.
       // clear & stop webAppContext attributes to avoid memory leaks.
       webAppContext.clearAttributes();
       webAppContext.clearAttributes();

+ 50 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java

@@ -60,27 +60,61 @@ public class SSLFactory implements ConnectionConfigurator {
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   public static enum Mode { CLIENT, SERVER }
   public static enum Mode { CLIENT, SERVER }
 
 
+  public static final String SSL_CLIENT_CONF_KEY = "hadoop.ssl.client.conf";
+  public static final String SSL_CLIENT_CONF_DEFAULT = "ssl-client.xml";
+  public static final String SSL_SERVER_CONF_KEY = "hadoop.ssl.server.conf";
+  public static final String SSL_SERVER_CONF_DEFAULT = "ssl-server.xml";
+
   public static final String SSL_REQUIRE_CLIENT_CERT_KEY =
   public static final String SSL_REQUIRE_CLIENT_CERT_KEY =
-    "hadoop.ssl.require.client.cert";
+      "hadoop.ssl.require.client.cert";
+  public static final boolean SSL_REQUIRE_CLIENT_CERT_DEFAULT = false;
   public static final String SSL_HOSTNAME_VERIFIER_KEY =
   public static final String SSL_HOSTNAME_VERIFIER_KEY =
-    "hadoop.ssl.hostname.verifier";
-  public static final String SSL_CLIENT_CONF_KEY =
-    "hadoop.ssl.client.conf";
-  public static final String SSL_SERVER_CONF_KEY =
-    "hadoop.ssl.server.conf";
-  public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509";
+      "hadoop.ssl.hostname.verifier";
+  public static final String SSL_ENABLED_PROTOCOLS_KEY =
+      "hadoop.ssl.enabled.protocols";
+  public static final String SSL_ENABLED_PROTOCOLS_DEFAULT =
+      "TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2";
+
+  public static final String SSL_SERVER_NEED_CLIENT_AUTH =
+      "ssl.server.need.client.auth";
+  public static final boolean SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT = false;
+
+  public static final String SSL_SERVER_KEYSTORE_LOCATION =
+      "ssl.server.keystore.location";
+  public static final String SSL_SERVER_KEYSTORE_PASSWORD =
+      "ssl.server.keystore.password";
+  public static final String SSL_SERVER_KEYSTORE_TYPE =
+      "ssl.server.keystore.type";
+  public static final String SSL_SERVER_KEYSTORE_TYPE_DEFAULT = "jks";
+  public static final String SSL_SERVER_KEYSTORE_KEYPASSWORD =
+      "ssl.server.keystore.keypassword";
+
+  public static final String SSL_SERVER_TRUSTSTORE_LOCATION =
+      "ssl.server.truststore.location";
+  public static final String SSL_SERVER_TRUSTSTORE_PASSWORD =
+      "ssl.server.truststore.password";
+  public static final String SSL_SERVER_TRUSTSTORE_TYPE =
+      "ssl.server.truststore.type";
+  public static final String SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT = "jks";
+
+  public static final String SSL_SERVER_EXCLUDE_CIPHER_LIST =
+      "ssl.server.exclude.cipher.list";
 
 
-  public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
+  @Deprecated
+  public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT =
+      SSL_REQUIRE_CLIENT_CERT_DEFAULT;
+
+  public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509";
 
 
   public static final String KEYSTORES_FACTORY_CLASS_KEY =
   public static final String KEYSTORES_FACTORY_CLASS_KEY =
     "hadoop.ssl.keystores.factory.class";
     "hadoop.ssl.keystores.factory.class";
 
 
+  @Deprecated
   public static final String SSL_ENABLED_PROTOCOLS =
   public static final String SSL_ENABLED_PROTOCOLS =
-      "hadoop.ssl.enabled.protocols";
+      SSL_ENABLED_PROTOCOLS_KEY;
+  @Deprecated
   public static final String DEFAULT_SSL_ENABLED_PROTOCOLS =
   public static final String DEFAULT_SSL_ENABLED_PROTOCOLS =
-      "TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2";
-  public static final String SSL_SERVER_EXCLUDE_CIPHER_LIST =
-      "ssl.server.exclude.cipher.list";
+      SSL_ENABLED_PROTOCOLS_DEFAULT;
 
 
   private Configuration conf;
   private Configuration conf;
   private Mode mode;
   private Mode mode;
@@ -131,9 +165,11 @@ public class SSLFactory implements ConnectionConfigurator {
     sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert);
     sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert);
     String sslConfResource;
     String sslConfResource;
     if (mode == Mode.CLIENT) {
     if (mode == Mode.CLIENT) {
-      sslConfResource = conf.get(SSL_CLIENT_CONF_KEY, "ssl-client.xml");
+      sslConfResource = conf.get(SSL_CLIENT_CONF_KEY,
+          SSL_CLIENT_CONF_DEFAULT);
     } else {
     } else {
-      sslConfResource = conf.get(SSL_SERVER_CONF_KEY, "ssl-server.xml");
+      sslConfResource = conf.get(SSL_SERVER_CONF_KEY,
+          SSL_SERVER_CONF_DEFAULT);
     }
     }
     sslConf.addResource(sslConfResource);
     sslConf.addResource(sslConfResource);
     return sslConf;
     return sslConf;

+ 6 - 2
hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md

@@ -207,6 +207,12 @@ NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider)
 
 
 NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs a soft-delete instead, or delay the actual deletion, to prevent mistake). In these cases, one may encounter errors when creating/deleting a key with the same name after deleting it. Please check the underlying KeyProvider for details.
 NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs a soft-delete instead, or delay the actual deletion, to prevent mistake). In these cases, one may encounter errors when creating/deleting a key with the same name after deleting it. Please check the underlying KeyProvider for details.
 
 
+### `kms`
+
+Usage: `hadoop kms`
+
+Run KMS, the Key Management Server.
+
 ### `trace`
 ### `trace`
 
 
 View and modify Hadoop tracing settings. See the [Tracing Guide](./Tracing.html).
 View and modify Hadoop tracing settings. See the [Tracing Guide](./Tracing.html).
@@ -267,8 +273,6 @@ This command works by sending a HTTP/HTTPS request to the daemon's internal Jett
     * node manager
     * node manager
     * Timeline server
     * Timeline server
 
 
-However, the command does not support KMS server, because its web interface is based on Tomcat, which does not support the servlet.
-
 
 
 Files
 Files
 -----
 -----

+ 42 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_mkdir.bats

@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_mkdir (create)" {
+  DIR=${BATS_TMPDIR}/nodir
+  rm -fr ${DIR}
+  run hadoop_mkdir ${DIR}
+  [ "${status}" = 0 ]
+  [ "${output}" = "WARNING: ${DIR} does not exist. Creating." ]
+}
+
+
+@test "hadoop_mkdir (exists)" {
+  DIR=${BATS_TMPDIR}/exists
+  mkdir -p ${DIR}
+  run hadoop_mkdir ${DIR}
+  [ "${status}" = 0 ]
+  [ -z "${output}" ]
+}
+
+
+@test "hadoop_mkdir (failed)" {
+  DIR=${BATS_TMPDIR}/readonly_dir/dir
+  mkdir -p ${BATS_TMPDIR}/readonly_dir
+  chmod a-w ${BATS_TMPDIR}/readonly_dir
+  run hadoop_mkdir ${DIR}
+  [ "${status}" != 0 ]
+}

+ 33 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_using_envvar.bats

@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_using_envvar (has value)" {
+  HADOOP_SHELL_SCRIPT_DEBUG=true
+  VAR=value
+  run hadoop_using_envvar VAR
+  [ "${status}" = 0 ]
+  [ "${output}" = "DEBUG: VAR = value" ]
+}
+
+
+@test "hadoop_using_envvar (no value)" {
+  HADOOP_SHELL_SCRIPT_DEBUG=true
+  VAR=
+  run hadoop_using_envvar VAR
+  [ "${status}" = 0 ]
+  [ -z "${output}" ]
+}

+ 1 - 1
hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml

@@ -39,7 +39,7 @@
     <Bug pattern="DM_EXIT"/>
     <Bug pattern="DM_EXIT"/>
   </Match>
   </Match>
   <!--
   <!--
-    KMS wants to log the exception before it's thrown to tomcat and disappear.
+    KMS wants to log the exception before it's thrown to Jetty and disappear.
   -->
   -->
   <Match>
   <Match>
     <Class name="org.apache.hadoop.crypto.key.kms.server.KMS"/>
     <Class name="org.apache.hadoop.crypto.key.kms.server.KMS"/>

+ 11 - 149
hadoop-common-project/hadoop-kms/pom.xml

@@ -27,20 +27,11 @@
   </parent>
   </parent>
   <artifactId>hadoop-kms</artifactId>
   <artifactId>hadoop-kms</artifactId>
   <version>3.0.0-alpha2-SNAPSHOT</version>
   <version>3.0.0-alpha2-SNAPSHOT</version>
-  <packaging>war</packaging>
+  <packaging>jar</packaging>
 
 
   <name>Apache Hadoop KMS</name>
   <name>Apache Hadoop KMS</name>
   <description>Apache Hadoop KMS</description>
   <description>Apache Hadoop KMS</description>
 
 
-  <properties>
-    <kms.tomcat.dist.dir>
-      ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat
-    </kms.tomcat.dist.dir>
-    <tomcat.download.url>
-      http://archive.apache.org/dist/tomcat/tomcat-6/v${tomcat.version}/bin/apache-tomcat-${tomcat.version}.tar.gz
-    </tomcat.download.url>
-  </properties>
-
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
@@ -80,12 +71,14 @@
     <dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
       <groupId>javax.servlet</groupId>
       <artifactId>javax.servlet-api</artifactId>
       <artifactId>javax.servlet-api</artifactId>
-      <scope>provided</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-server</artifactId>
       <artifactId>jetty-server</artifactId>
-      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
@@ -100,14 +93,6 @@
           <groupId>commons-httpclient</groupId>
           <groupId>commons-httpclient</groupId>
           <artifactId>commons-httpclient</artifactId>
           <artifactId>commons-httpclient</artifactId>
         </exclusion>
         </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
         <exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
           <groupId>javax.servlet</groupId>
           <artifactId>javax.servlet-api</artifactId>
           <artifactId>javax.servlet-api</artifactId>
@@ -229,66 +214,21 @@
         <artifactId>maven-antrun-plugin</artifactId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>create-web-xmls</id>
-            <phase>generate-test-resources</phase>
+            <id>site</id>
+            <phase>site</phase>
             <goals>
             <goals>
               <goal>run</goal>
               <goal>run</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
               <target>
               <target>
-                <mkdir dir="${project.build.directory}/test-classes/kms-webapp"/>
-
-                <copy todir="${project.build.directory}/test-classes/kms-webapp">
-                  <fileset dir="${basedir}/src/main/webapp"/>
-                </copy>
+                <xslt in="${basedir}/src/main/resources/kms-default.xml"
+                      out="${project.build.directory}/site/kms-default.html"
+                      style="${basedir}/src/site/configuration.xsl"/>
               </target>
               </target>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-war-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>default-war</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>war</goal>
-            </goals>
-            <configuration>
-              <archiveClasses>true</archiveClasses>
-              <warName>kms</warName>
-              <webappDirectory>${project.build.directory}/kms
-              </webappDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>prepare-jar</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>jar</goal>
-            </goals>
-            <configuration>
-              <classifier>classes</classifier>
-            </configuration>
-          </execution>
-          <execution>
-            <id>prepare-test-jar</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
+      </plugin>      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
         <artifactId>findbugs-maven-plugin</artifactId>
         <configuration>
         <configuration>
@@ -360,84 +300,6 @@
               </execution>
               </execution>
             </executions>
             </executions>
           </plugin>
           </plugin>
-          <!-- Downloading Tomcat TAR.GZ, using downloads/ dir to avoid downloading over an over -->
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>dist</id>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <phase>package</phase>
-                <configuration>
-                  <target>
-                    <mkdir dir="downloads"/>
-                    <get
-                      src="${tomcat.download.url}"
-                      dest="downloads/apache-tomcat-${tomcat.version}.tar.gz"
-                      verbose="true" skipexisting="true"/>
-                    <delete dir="${project.build.directory}/tomcat.exp"/>
-                    <mkdir dir="${project.build.directory}/tomcat.exp"/>
-
-                    <!-- Using Unix script to preserve file permissions -->
-                    <echo file="${project.build.directory}/tomcat-untar.sh">
-                      cd "${project.build.directory}/tomcat.exp"
-                      gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
-                    </echo>
-                    <exec executable="${shell-executable}" dir="${project.build.directory}"
-                          failonerror="true">
-                      <arg line="./tomcat-untar.sh"/>
-                    </exec>
-
-                    <move
-                      file="${project.build.directory}/tomcat.exp/apache-tomcat-${tomcat.version}"
-                      tofile="${kms.tomcat.dist.dir}"/>
-                    <delete dir="${project.build.directory}/tomcat.exp"/>
-                    <delete dir="${kms.tomcat.dist.dir}/webapps"/>
-                    <mkdir dir="${kms.tomcat.dist.dir}/webapps"/>
-                    <delete file="${kms.tomcat.dist.dir}/conf/server.xml"/>
-                    <copy file="${basedir}/src/main/tomcat/server.xml"
-                          toDir="${kms.tomcat.dist.dir}/conf"/>
-                    <delete file="${kms.tomcat.dist.dir}/conf/ssl-server.xml.conf"/>
-                    <copy file="${basedir}/src/main/tomcat/ssl-server.xml.conf"
-                          toDir="${kms.tomcat.dist.dir}/conf"/>
-                    <delete
-                      file="${kms.tomcat.dist.dir}/conf/logging.properties"/>
-                    <copy file="${basedir}/src/main/tomcat/logging.properties"
-                          toDir="${kms.tomcat.dist.dir}/conf"/>
-                    <copy toDir="${kms.tomcat.dist.dir}/webapps/ROOT">
-                      <fileset dir="${basedir}/src/main/tomcat/ROOT"/>
-                    </copy>
-                    <copy toDir="${kms.tomcat.dist.dir}/webapps/kms">
-                      <fileset dir="${project.build.directory}/kms"/>
-                    </copy>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>tar</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target if="tar">
-                    <!-- Using Unix script to preserve symlinks -->
-                    <echo file="${project.build.directory}/dist-maketar.sh">
-                      cd "${project.build.directory}"
-                      tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
-                    </echo>
-                    <exec executable="${shell-executable}" dir="${project.build.directory}"
-                          failonerror="true">
-                      <arg line="./dist-maketar.sh"/>
-                    </exec>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
         </plugins>
         </plugins>
       </build>
       </build>
     </profile>
     </profile>

+ 15 - 33
hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh

@@ -18,6 +18,14 @@
 # hadoop-env.sh is read prior to this file.
 # hadoop-env.sh is read prior to this file.
 #
 #
 
 
+# KMS config directory
+#
+# export KMS_CONFIG=${HADOOP_CONF_DIR}
+
+# KMS log directory
+#
+# export KMS_LOG=${HADOOP_LOG_DIR}
+
 # KMS temporary directory
 # KMS temporary directory
 #
 #
 # export KMS_TEMP=${HADOOP_HOME}/temp
 # export KMS_TEMP=${HADOOP_HOME}/temp
@@ -26,48 +34,22 @@
 #
 #
 # export KMS_HTTP_PORT=9600
 # export KMS_HTTP_PORT=9600
 
 
-# The Admin port used by KMS
-#
-# export KMS_ADMIN_PORT=$((KMS_HTTP_PORT + 1))
-
-# The maximum number of Tomcat handler threads
+# The maximum number of HTTP handler threads
 #
 #
 # export KMS_MAX_THREADS=1000
 # export KMS_MAX_THREADS=1000
 
 
-# The maximum size of Tomcat HTTP header
+# The maximum size of HTTP header
 #
 #
 # export KMS_MAX_HTTP_HEADER_SIZE=65536
 # export KMS_MAX_HTTP_HEADER_SIZE=65536
 
 
+# Whether SSL is enabled
+#
+# export KMS_SSL_ENABLED=false
+
 # The location of the SSL keystore if using SSL
 # The location of the SSL keystore if using SSL
 #
 #
 # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
 # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
 
 
-#
 # The password of the SSL keystore if using SSL
 # The password of the SSL keystore if using SSL
 #
 #
-# export KMS_SSL_KEYSTORE_PASS=password
-
-
-##
-## Tomcat specific settings
-##
-#
-# Location of tomcat
-#
-# export KMS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/kms/tomcat
-
-# Java System properties for KMS should be specified in this variable.
-# The java.library.path and hadoop.home.dir properties are automatically
-# configured.  In order to supplement java.library.path,
-# one should add to the JAVA_LIBRARY_PATH env var.
-#
-# export CATALINA_OPTS=
-
-# PID file
-#
-# export CATALINA_PID=${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms.pid
-
-# Output file
-#
-# export CATALINA_OUT=${KMS_LOG}/hadoop-${HADOOP_IDENT_STRING}-kms-${HOSTNAME}.out
-
+# export KMS_SSL_KEYSTORE_PASS=password

+ 1 - 2
hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties

@@ -32,7 +32,6 @@ log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
 log4j.logger.kms-audit=INFO, kms-audit
 log4j.logger.kms-audit=INFO, kms-audit
 log4j.additivity.kms-audit=false
 log4j.additivity.kms-audit=false
 
 
-log4j.rootLogger=ALL, kms
-log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.rootLogger=INFO, kms
 log4j.logger.org.apache.hadoop=INFO
 log4j.logger.org.apache.hadoop=INFO
 log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
 log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF

+ 2 - 165
hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml

@@ -12,172 +12,9 @@
   See the License for the specific language governing permissions and
   See the License for the specific language governing permissions and
   limitations under the License.
   limitations under the License.
 -->
 -->
-<configuration>
-
-  <!-- KMS Backend KeyProvider -->
-
-  <property>
-    <name>hadoop.kms.key.provider.uri</name>
-    <value>jceks://file@/${user.home}/kms.keystore</value>
-    <description>
-      URI of the backing KeyProvider for the KMS.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.keystore.java-keystore-provider.password-file</name>
-    <value>kms.keystore.password</value>
-    <description>
-      If using the JavaKeyStoreProvider, the file name for the keystore password.
-    </description>
-  </property>
-
-  <!-- KMS Cache -->
-
-  <property>
-    <name>hadoop.kms.cache.enable</name>
-    <value>true</value>
-    <description>
-      Whether the KMS will act as a cache for the backing KeyProvider.
-      When the cache is enabled, operations like getKeyVersion, getMetadata,
-      and getCurrentKey will sometimes return cached data without consulting
-      the backing KeyProvider. Cached values are flushed when keys are deleted
-      or modified.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.cache.timeout.ms</name>
-    <value>600000</value>
-    <description>
-      Expiry time for the KMS key version and key metadata cache, in
-      milliseconds. This affects getKeyVersion and getMetadata.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.current.key.cache.timeout.ms</name>
-    <value>30000</value>
-    <description>
-      Expiry time for the KMS current key cache, in milliseconds. This
-      affects getCurrentKey operations.
-    </description>
-  </property>
-
-  <!-- KMS Audit -->
-
-  <property>
-    <name>hadoop.kms.audit.aggregation.window.ms</name>
-    <value>10000</value>
-    <description>
-      Duplicate audit log events within the aggregation window (specified in
-      ms) are quashed to reduce log traffic. A single message for aggregated
-      events is printed at the end of the window, along with a count of the
-      number of aggregated events.
-    </description>
-  </property>
-
-  <!-- KMS Security -->
-
-  <property>
-    <name>hadoop.kms.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication type for the KMS. Can be either &quot;simple&quot;
-      or &quot;kerberos&quot;.
-    </description>
-  </property>
 
 
-  <property>
-    <name>hadoop.kms.authentication.kerberos.keytab</name>
-    <value>${user.home}/kms.keytab</value>
-    <description>
-      Path to the keytab with credentials for the configured Kerberos principal.
-    </description>
-  </property>
+<!-- Put site-specific property overrides in this file. -->
 
 
-  <property>
-    <name>hadoop.kms.authentication.kerberos.principal</name>
-    <value>HTTP/localhost</value>
-    <description>
-      The Kerberos principal to use for the HTTP endpoint.
-      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>
-      Rules used to resolve Kerberos principal names.
-    </description>
-  </property>
-
-  <!-- Authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider</name>
-    <value>random</value>
-    <description>
-      Indicates how the secret to sign the authentication cookies will be
-      stored. Options are 'random' (default), 'string' and 'zookeeper'.
-      If using a setup with multiple KMS instances, 'zookeeper' should be used.
-    </description>
-  </property>
-
-  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
-    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
-    <description>
-      The Zookeeper ZNode path where the KMS instances will store and retrieve
-      the secret from.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
-    <value>#HOSTNAME#:#PORT#,...</value>
-    <description>
-      The Zookeeper connection string, a list of hostnames and port comma
-      separated.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
-    <value>none</value>
-    <description>
-      The Zookeeper authentication type, 'none' (default) or 'sasl' (Kerberos).
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
-    <value>/etc/hadoop/conf/kms.keytab</value>
-    <description>
-      The absolute path for the Kerberos keytab with the credentials to
-      connect to Zookeeper.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
-    <value>kms/#HOSTNAME#</value>
-    <description>
-      The Kerberos service principal used to connect to Zookeeper.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.audit.logger</name>
-    <value>org.apache.hadoop.crypto.key.kms.server.SimpleKMSAuditLogger</value>
-    <description>
-      The audit logger for KMS. It is a comma-separated list of KMSAuditLogger
-      class names. Default is the text-format SimpleKMSAuditLogger only.
-      If this is not configured, default will be used.
-    </description>
-  </property>
+<configuration>
 
 
 </configuration>
 </configuration>

+ 16 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -32,6 +32,7 @@ import java.net.URL;
 public class KMSConfiguration {
 public class KMSConfiguration {
 
 
   public static final String KMS_CONFIG_DIR = "kms.config.dir";
   public static final String KMS_CONFIG_DIR = "kms.config.dir";
+  public static final String KMS_DEFAULT_XML = "kms-default.xml";
   public static final String KMS_SITE_XML = "kms-site.xml";
   public static final String KMS_SITE_XML = "kms-site.xml";
   public static final String KMS_ACLS_XML = "kms-acls.xml";
   public static final String KMS_ACLS_XML = "kms-acls.xml";
 
 
@@ -42,6 +43,16 @@ public class KMSConfiguration {
   public static final String DEFAULT_KEY_ACL_PREFIX = "default.key.acl.";
   public static final String DEFAULT_KEY_ACL_PREFIX = "default.key.acl.";
   public static final String WHITELIST_KEY_ACL_PREFIX = "whitelist.key.acl.";
   public static final String WHITELIST_KEY_ACL_PREFIX = "whitelist.key.acl.";
 
 
+  // HTTP properties
+  public static final String HTTP_PORT_KEY = "hadoop.kms.http.port";
+  public static final int HTTP_PORT_DEFAULT = 9600;
+  public static final String HTTP_HOST_KEY = "hadoop.kms.http.host";
+  public static final String HTTP_HOST_DEFAULT = "0.0.0.0";
+
+  // SSL properties
+  public static final String SSL_ENABLED_KEY = "hadoop.kms.ssl.enabled";
+  public static final boolean SSL_ENABLED_DEFAULT = false;
+
   // Property to set the backing KeyProvider
   // Property to set the backing KeyProvider
   public static final String KEY_PROVIDER_URI = CONFIG_PREFIX +
   public static final String KEY_PROVIDER_URI = CONFIG_PREFIX +
       "key.provider.uri";
       "key.provider.uri";
@@ -77,6 +88,11 @@ public class KMSConfiguration {
 
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
 
+  static {
+    Configuration.addDefaultResource(KMS_DEFAULT_XML);
+    Configuration.addDefaultResource(KMS_SITE_XML);
+  }
+
   static Configuration getConfiguration(boolean loadHadoopDefaults,
   static Configuration getConfiguration(boolean loadHadoopDefaults,
       String ... resources) {
       String ... resources) {
     Configuration conf = new Configuration(loadHadoopDefaults);
     Configuration conf = new Configuration(loadHadoopDefaults);

+ 0 - 36
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.crypto.key.kms.server;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.jmx.JMXJsonServlet;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import java.io.IOException;
-
-@InterfaceAudience.Private
-public class KMSJMXServlet extends JMXJsonServlet {
-
-  @Override
-  protected boolean isInstrumentationAccessAllowed(HttpServletRequest request,
-      HttpServletResponse response) throws IOException {
-    return true;
-  }
-}

+ 8 - 15
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -17,10 +17,17 @@
  */
  */
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URL;
+
+import javax.servlet.ServletContextEvent;
+import javax.servlet.ServletContextListener;
+
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.Meter;
 import com.codahale.metrics.Meter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.MetricRegistry;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.CachingKeyProvider;
 import org.apache.hadoop.crypto.key.CachingKeyProvider;
@@ -34,15 +41,6 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
-import org.slf4j.bridge.SLF4JBridgeHandler;
-
-import javax.servlet.ServletContextEvent;
-import javax.servlet.ServletContextListener;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URL;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class KMSWebApp implements ServletContextListener {
 public class KMSWebApp implements ServletContextListener {
@@ -81,11 +79,6 @@ public class KMSWebApp implements ServletContextListener {
   private static KMSAudit kmsAudit;
   private static KMSAudit kmsAudit;
   private static KeyProviderCryptoExtension keyProviderCryptoExtension;
   private static KeyProviderCryptoExtension keyProviderCryptoExtension;
 
 
-  static {
-    SLF4JBridgeHandler.removeHandlersForRootLogger();
-    SLF4JBridgeHandler.install();
-  }
-
   private void initLogging(String confDir) {
   private void initLogging(String confDir) {
     if (System.getProperty("log4j.configuration") == null) {
     if (System.getProperty("log4j.configuration") == null) {
       System.setProperty("log4j.defaultInitOverride", "true");
       System.setProperty("log4j.defaultInitOverride", "true");

+ 155 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java

@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.ConfigurationWithLogging;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The KMS web server.
+ */
+@InterfaceAudience.Private
+public class KMSWebServer {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KMSWebServer.class);
+
+  private static final String NAME = "kms";
+  private static final String SERVLET_PATH = "/kms";
+
+  private final HttpServer2 httpServer;
+  private final String scheme;
+
+  KMSWebServer(Configuration cnf) throws Exception {
+    ConfigurationWithLogging conf = new ConfigurationWithLogging(cnf);
+
+    // Add SSL configuration file
+    conf.addResource(conf.get(SSLFactory.SSL_SERVER_CONF_KEY,
+        SSLFactory.SSL_SERVER_CONF_DEFAULT));
+
+    // Override configuration with deprecated environment variables.
+    deprecateEnv("KMS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
+        KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_HTTP_PORT", conf,
+        KMSConfiguration.HTTP_PORT_KEY, KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_MAX_THREADS", conf,
+        HttpServer2.HTTP_MAX_THREADS_KEY, KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_MAX_HTTP_HEADER_SIZE", conf,
+        HttpServer2.HTTP_MAX_REQUEST_HEADER_SIZE_KEY,
+        KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_MAX_HTTP_HEADER_SIZE", conf,
+        HttpServer2.HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
+        KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_SSL_ENABLED", conf,
+        KMSConfiguration.SSL_ENABLED_KEY, KMSConfiguration.KMS_SITE_XML);
+    deprecateEnv("KMS_SSL_KEYSTORE_FILE", conf,
+        SSLFactory.SSL_SERVER_KEYSTORE_LOCATION,
+        SSLFactory.SSL_SERVER_CONF_DEFAULT);
+    deprecateEnv("KMS_SSL_KEYSTORE_PASS", conf,
+        SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD,
+        SSLFactory.SSL_SERVER_CONF_DEFAULT);
+
+    boolean sslEnabled = conf.getBoolean(KMSConfiguration.SSL_ENABLED_KEY,
+        KMSConfiguration.SSL_ENABLED_DEFAULT);
+    scheme = sslEnabled ? HttpServer2.HTTPS_SCHEME : HttpServer2.HTTP_SCHEME;
+
+    String host = conf.get(KMSConfiguration.HTTP_HOST_KEY,
+        KMSConfiguration.HTTP_HOST_DEFAULT);
+    int port = conf.getInt(KMSConfiguration.HTTP_PORT_KEY,
+        KMSConfiguration.HTTP_PORT_DEFAULT);
+    URI endpoint = new URI(scheme, null, host, port, null, null, null);
+
+    httpServer = new HttpServer2.Builder()
+        .setName(NAME)
+        .setConf(conf)
+        .setSSLConf(conf)
+        .authFilterConfigurationPrefix(KMSAuthenticationFilter.CONFIG_PREFIX)
+        .addEndpoint(endpoint)
+        .build();
+  }
+
+  /**
+   * Load the deprecated environment variable into the configuration.
+   *
+   * @param varName the environment variable name
+   * @param conf the configuration
+   * @param propName the configuration property name
+   * @param confFile the configuration file name
+   */
+  private static void deprecateEnv(String varName, Configuration conf,
+                                   String propName, String confFile) {
+    String value = System.getenv(varName);
+    if (value == null) {
+      return;
+    }
+    String propValue = conf.get(propName);
+    LOG.warn("Environment variable {} = '{}' is deprecated and overriding"
+        + " property {} = '{}', please set the property in {} instead.",
+        varName, value, propName, propValue, confFile);
+    conf.set(propName, value, "environment variable " + varName);
+  }
+
+  public void start() throws IOException {
+    httpServer.start();
+  }
+
+  public boolean isRunning() {
+    return httpServer.isAlive();
+  }
+
+  public void join() throws InterruptedException {
+    httpServer.join();
+  }
+
+  public void stop() throws Exception {
+    httpServer.stop();
+  }
+
+  public URL getKMSUrl() {
+    InetSocketAddress addr = httpServer.getConnectorAddress(0);
+    if (null == addr) {
+      return null;
+    }
+    try {
+      return new URL(scheme, addr.getHostName(), addr.getPort(),
+          SERVLET_PATH);
+    } catch (MalformedURLException ex) {
+      throw new RuntimeException("It should never happen: " + ex.getMessage(),
+          ex);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    StringUtils.startupShutdownMessage(KMSWebServer.class, args, LOG);
+    Configuration conf = KMSConfiguration.getKMSConf();
+    KMSWebServer kmsWebServer = new KMSWebServer(conf);
+    kmsWebServer.start();
+    kmsWebServer.join();
+  }
+}

+ 0 - 72
hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh

@@ -1,72 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-function hadoop_subproject_init
-{
-  local this
-  local binparent
-  local varlist
-
-  if [[ -z "${HADOOP_KMS_ENV_PROCESSED}" ]]; then
-    if [[ -e "${HADOOP_CONF_DIR}/kms-env.sh" ]]; then
-      . "${HADOOP_CONF_DIR}/kms-env.sh"
-      export HADOOP_KMS_ENV_PROCESSED=true
-    fi
-  fi
-
-  export HADOOP_CATALINA_PREFIX=kms
-
-  export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_HOME}/temp}"
-
-  hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
-
-  hadoop_deprecate_envvar KMS_LOG HADOOP_LOG_DIR
-
-  export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}"
-  export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}"
-
-  export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-9600}"
-  export HADOOP_CATALINA_ADMIN_PORT="${KMS_ADMIN_PORT:-$((HADOOP_CATALINA_HTTP_PORT+1))}"
-  export HADOOP_CATALINA_MAX_THREADS="${KMS_MAX_THREADS:-1000}"
-  export HADOOP_CATALINA_MAX_HTTP_HEADER_SIZE="${KMS_MAX_HTTP_HEADER_SIZE:-65536}"
-
-  export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${KMS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
-
-  export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/kms/tomcat}"
-  export HADOOP_CATALINA_HOME="${KMS_CATALINA_HOME:-${CATALINA_BASE}}"
-
-  export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms-${HOSTNAME}.out}"
-
-  export CATALINA_PID="${CATALINA_PID:-${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms.pid}"
-
-  if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
-    varlist=$(env | egrep '(^KMS|^CATALINA)' | cut -f1 -d= | grep -v _PASS)
-    for i in ${varlist}; do
-      hadoop_debug "Setting ${i} to ${!i}"
-    done
-  fi
-}
-
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
-   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
-else
-  echo "ERROR: Hadoop common not found." 2>&1
-  exit 1
-fi

+ 57 - 0
hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh

@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
+  hadoop_add_subcommand "kms" "run KMS, the Key Management Server"
+fi
+
+## @description  Command handler for kms subcommand
+## @audience     private
+## @stability    stable
+## @replaceable  no
+function hadoop_subcommand_kms
+{
+  if [[ -f "${HADOOP_CONF_DIR}/kms-env.sh" ]]; then
+    # shellcheck disable=SC1090
+    . "${HADOOP_CONF_DIR}/kms-env.sh"
+  fi
+
+  hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
+  hadoop_deprecate_envvar KMS_LOG HADOOP_LOG_DIR
+
+  hadoop_using_envvar KMS_HTTP_PORT
+  hadoop_using_envvar KMS_MAX_HTTP_HEADER_SIZE
+  hadoop_using_envvar KMS_MAX_THREADS
+  hadoop_using_envvar KMS_SSL_ENABLED
+  hadoop_using_envvar KMS_SSL_KEYSTORE_FILE
+  hadoop_using_envvar KMS_TEMP
+
+  # shellcheck disable=SC2034
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION=true
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.kms.server.KMSWebServer
+
+  hadoop_add_param HADOOP_OPTS "-Dkms.config.dir=" \
+    "-Dkms.config.dir=${HADOOP_CONF_DIR}"
+  hadoop_add_param HADOOP_OPTS "-Dkms.log.dir=" \
+    "-Dkms.log.dir=${HADOOP_LOG_DIR}"
+
+  if [[ "${HADOOP_DAEMON_MODE}" == "default" ]] ||
+     [[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
+    hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}"
+  fi
+}

+ 248 - 0
hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml

@@ -0,0 +1,248 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!--
+  Do not modify this file directly.  Instead, copy entries that you wish to
+  modify from this file into kms-site.xml and change them there.  If
+  kms-site.xml does not already exist, create it.
+-->
+
+<configuration>
+
+  <property>
+    <name>hadoop.kms.http.port</name>
+    <value>9600</value>
+    <description>
+      The HTTP port for KMS REST API.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.http.host</name>
+    <value>0.0.0.0</value>
+    <description>
+      The bind host for KMS REST API.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.ssl.enabled</name>
+    <value>false</value>
+    <description>
+      Whether SSL is enabled. Default is false, i.e. disabled.
+    </description>
+  </property>
+
+  <!-- HTTP properties -->
+
+  <property>
+    <name>hadoop.http.max.threads</name>
+    <value>1000</value>
+    <description>
+      The maxmimum number of threads.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.max.request.header.size</name>
+    <value>65536</value>
+    <description>
+      The maxmimum HTTP request header size.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.max.response.header.size</name>
+    <value>65536</value>
+    <description>
+      The maxmimum HTTP response header size.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.temp.dir</name>
+    <value>${hadoop.tmp.dir}/kms</value>
+    <description>
+      KMS temp directory.
+    </description>
+  </property>
+
+  <!-- KMS Backend KeyProvider -->
+
+  <property>
+    <name>hadoop.kms.key.provider.uri</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+    <description>
+      URI of the backing KeyProvider for the KMS.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.java-keystore-provider.password-file</name>
+    <value></value>
+    <description>
+      If using the JavaKeyStoreProvider, the file name for the keystore password.
+    </description>
+  </property>
+
+  <!-- KMS Cache -->
+
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+    <description>
+      Whether the KMS will act as a cache for the backing KeyProvider.
+      When the cache is enabled, operations like getKeyVersion, getMetadata,
+      and getCurrentKey will sometimes return cached data without consulting
+      the backing KeyProvider. Cached values are flushed when keys are deleted
+      or modified.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>600000</value>
+    <description>
+      Expiry time for the KMS key version and key metadata cache, in
+      milliseconds. This affects getKeyVersion and getMetadata.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
+    <description>
+      Expiry time for the KMS current key cache, in milliseconds. This
+      affects getCurrentKey operations.
+    </description>
+  </property>
+
+  <!-- KMS Audit -->
+
+  <property>
+    <name>hadoop.kms.audit.aggregation.window.ms</name>
+    <value>10000</value>
+    <description>
+      Duplicate audit log events within the aggregation window (specified in
+      ms) are quashed to reduce log traffic. A single message for aggregated
+      events is printed at the end of the window, along with a count of the
+      number of aggregated events.
+    </description>
+  </property>
+
+  <!-- KMS Security -->
+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication type for the KMS. Can be either 'simple' (default) or
+      'kerberos'.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>
+      Path to the keytab with credentials for the configured Kerberos principal.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>
+      The Kerberos principal to use for the HTTP endpoint.
+      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>
+      Rules used to resolve Kerberos principal names.
+    </description>
+  </property>
+
+  <!-- Authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider</name>
+    <value>random</value>
+    <description>
+      Indicates how the secret to sign the authentication cookies will be
+      stored. Options are 'random' (default), 'string' and 'zookeeper'.
+      If using a setup with multiple KMS instances, 'zookeeper' should be used.
+    </description>
+  </property>
+
+  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+    <description>
+      The Zookeeper ZNode path where the KMS instances will store and retrieve
+      the secret from.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+    <value>#HOSTNAME#:#PORT#,...</value>
+    <description>
+      The Zookeeper connection string, a list of hostnames and port comma
+      separated.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+    <value>none</value>
+    <description>
+      The Zookeeper authentication type, 'none' (default) or 'sasl' (Kerberos).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+    <value>/etc/hadoop/conf/kms.keytab</value>
+    <description>
+      The absolute path for the Kerberos keytab with the credentials to
+      connect to Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+    <value>kms/#HOSTNAME#</value>
+    <description>
+      The Kerberos service principal used to connect to Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.audit.logger</name>
+    <value>org.apache.hadoop.crypto.key.kms.server.SimpleKMSAuditLogger</value>
+    <description>
+      The audit logger for KMS. It is a comma-separated list of KMSAuditLogger
+      class names. Default is the text-format SimpleKMSAuditLogger only.
+      If this is not configured, default will be used.
+    </description>
+  </property>
+
+</configuration>

+ 1 - 11
hadoop-common-project/hadoop-kms/src/main/webapp/WEB-INF/web.xml → hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml

@@ -40,19 +40,9 @@
     <load-on-startup>1</load-on-startup>
     <load-on-startup>1</load-on-startup>
   </servlet>
   </servlet>
 
 
-  <servlet>
-    <servlet-name>jmx-servlet</servlet-name>
-    <servlet-class>org.apache.hadoop.crypto.key.kms.server.KMSJMXServlet</servlet-class>
-  </servlet>
-
   <servlet-mapping>
   <servlet-mapping>
     <servlet-name>webservices-driver</servlet-name>
     <servlet-name>webservices-driver</servlet-name>
-    <url-pattern>/*</url-pattern>
-  </servlet-mapping>
-
-  <servlet-mapping>
-    <servlet-name>jmx-servlet</servlet-name>
-    <url-pattern>/jmx</url-pattern>
+    <url-pattern>/kms/*</url-pattern>
   </servlet-mapping>
   </servlet-mapping>
 
 
   <filter>
   <filter>

+ 10 - 2
hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/index.html → hadoop-common-project/hadoop-kms/src/main/resources/webapps/static/index.html

@@ -20,8 +20,16 @@
 <body>
 <body>
 <h1>Hadoop KMS</h1>
 <h1>Hadoop KMS</h1>
 <ul>
 <ul>
-  <li>KMS REST API end-point <b>/kms/v1/*</b></li>
-  <li><a href="/kms/jmx">KMS JMX JSON end-point</a></li>
+  <li>KMS REST API end-point <b>/kms/v1/</b></li>
+    <ul>
+      <li><a href="/kms/v1/keys/names">/kms/v1/keys/names</a>
+        to list all keys</li>
+    </ul>
+  <li><a href="/conf">KMS configuration properties</a></li>
+  <li><a href="/jmx">KMS JMX</a></li>
+  <li><a href="/logLevel">KMS log level</a></li>
+  <li><a href="/logs">KMS log files</a></li>
+  <li><a href="/stacks">KMS stacks</a></li>
 </ul>
 </ul>
 </body>
 </body>
 </html>
 </html>

+ 38 - 78
hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh

@@ -13,92 +13,52 @@
 #  limitations under the License.
 #  limitations under the License.
 #
 #
 
 
-MYNAME="${BASH_SOURCE-$0}"
+MYNAME="${0##*/}"
 
 
-function hadoop_usage
+## @description  Print usage
+## @audience     private
+## @stability    stable
+## @replaceable  no
+function print_usage
 {
 {
-  hadoop_add_subcommand "run" "Start kms in the current window"
-  hadoop_add_subcommand "run -security" "Start in the current window with security manager"
-  hadoop_add_subcommand "start" "Start kms in a separate window"
-  hadoop_add_subcommand "start -security" "Start in a separate window with security manager"
-  hadoop_add_subcommand "status" "Return the LSB compliant status"
-  hadoop_add_subcommand "stop" "Stop kms, waiting up to 5 seconds for the process to end"
-  hadoop_add_subcommand "top n" "Stop kms, waiting up to n seconds for the process to end"
-  hadoop_add_subcommand "stop -force" "Stop kms, wait up to 5 seconds and then use kill -KILL if still running"
-  hadoop_add_subcommand "stop n -force" "Stop kms, wait up to n seconds and then use kill -KILL if still running"
-  hadoop_generate_usage "${MYNAME}" false
+  cat <<EOF
+Usage: ${MYNAME} run|start|status|stop
+commands:
+  run     Run KMS, the Key Management Server
+  start   Start KMS as a daemon
+  status  Return the status of the KMS daemon
+  stop    Stop the KMS daemon
+EOF
 }
 }
 
 
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/kms-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/kms-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/kms-config.sh." 2>&1
-  exit 1
-fi
-
-# The Java System property 'kms.http.port' it is not used by Kms,
-# it is used in Tomcat's server.xml configuration file
-#
-
-hadoop_debug "Using   CATALINA_OPTS:       ${CATALINA_OPTS}"
-
-# We're using hadoop-common, so set up some stuff it might need:
-hadoop_finalize
-
-hadoop_verify_logdir
+echo "WARNING: ${MYNAME} is deprecated," \
+  "please use 'hadoop [--daemon start|status|stop] kms'." >&2
 
 
 if [[ $# = 0 ]]; then
 if [[ $# = 0 ]]; then
-  case "${HADOOP_DAEMON_MODE}" in
-    status)
-      hadoop_status_daemon "${CATALINA_PID}"
-      exit
-    ;;
-    start)
-      set -- "start"
-    ;;
-    stop)
-      set -- "stop"
-    ;;
-  esac
+  print_usage
+  exit
 fi
 fi
 
 
-hadoop_finalize_catalina_opts
-export CATALINA_OPTS
-
-# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
-#
-if [[ "${1}" = "stop" ]]; then
-  export JAVA_OPTS=${CATALINA_OPTS}
-fi
+case $1 in
+  run)
+    args=("kms")
+  ;;
+  start|stop|status)
+    args=("--daemon" "$1" "kms")
+  ;;
+  *)
+    echo "Unknown sub-command \"$1\"."
+    print_usage
+    exit 1
+  ;;
+esac
 
 
-# If ssl, the populate the passwords into ssl-server.xml before starting tomcat
-#
-# KMS_SSL_KEYSTORE_PASS is a bit odd.
-# if undefined, then the if test will not enable ssl on its own
-# if "", set it to "password".
-# if custom, use provided password
-#
-if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then
-  if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]]; then
-      export KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
-      KMS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
-        "$(hadoop_sed_escape "$KMS_SSL_KEYSTORE_PASS")")
-      sed -e 's/"_kms_ssl_keystore_pass_"/'"\"${KMS_SSL_KEYSTORE_PASS_ESCAPED}\""'/g' \
-        "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \
-        > "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml"
-      chmod 700 "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" >/dev/null 2>&1
-  fi
+# Locate bin
+if [[ -n "${HADOOP_HOME}" ]]; then
+  bin="${HADOOP_HOME}/bin"
+else
+  sbin=$(cd -P -- "$(dirname -- "$0")" >/dev/null && pwd -P)
+  bin=$(cd -P -- "${sbin}/../bin" >/dev/null && pwd -P)
 fi
 fi
 
 
-exec "${HADOOP_CATALINA_HOME}/bin/catalina.sh" "$@"
+exec "${bin}/hadoop" "${args[@]}"

+ 0 - 16
hadoop-common-project/hadoop-kms/src/main/tomcat/ROOT/WEB-INF/web.xml

@@ -1,16 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
-</web-app>

+ 0 - 67
hadoop-common-project/hadoop-kms/src/main/tomcat/logging.properties

@@ -1,67 +0,0 @@
-#
-#  All Rights Reserved.
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
-
-.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
-
-############################################################
-# Handler specific properties.
-# Describes specific configuration info for Handlers.
-############################################################
-
-1catalina.org.apache.juli.FileHandler.level = FINE
-1catalina.org.apache.juli.FileHandler.directory = ${kms.log.dir}
-1catalina.org.apache.juli.FileHandler.prefix = kms-catalina.
-
-2localhost.org.apache.juli.FileHandler.level = FINE
-2localhost.org.apache.juli.FileHandler.directory = ${kms.log.dir}
-2localhost.org.apache.juli.FileHandler.prefix = kms-localhost.
-
-3manager.org.apache.juli.FileHandler.level = FINE
-3manager.org.apache.juli.FileHandler.directory = ${kms.log.dir}
-3manager.org.apache.juli.FileHandler.prefix = kms-manager.
-
-4host-manager.org.apache.juli.FileHandler.level = FINE
-4host-manager.org.apache.juli.FileHandler.directory = ${kms.log.dir}
-4host-manager.org.apache.juli.FileHandler.prefix = kms-host-manager.
-
-java.util.logging.ConsoleHandler.level = FINE
-java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
-
-
-############################################################
-# Facility specific properties.
-# Provides extra control for each logger.
-############################################################
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler
-
-# For example, set the com.xyz.foo logger to only log SEVERE
-# messages:
-#org.apache.catalina.startup.ContextConfig.level = FINE
-#org.apache.catalina.startup.HostConfig.level = FINE
-#org.apache.catalina.session.ManagerBase.level = FINE
-#org.apache.catalina.core.AprLifecycleListener.level=FINE

+ 0 - 155
hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml

@@ -1,155 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
-
-   All Rights Reserved.
-
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Note:  A "Server" is not itself a "Container", so you may not
-     define subcomponents such as "Valves" at this level.
-     Documentation at /docs/config/server.html
- -->
-<Server port="${kms.admin.port}" shutdown="SHUTDOWN">
-
-  <!--APR library loader. Documentation at /docs/apr.html -->
-  <Listener className="org.apache.catalina.core.AprLifecycleListener"
-            SSLEngine="on"/>
-  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
-  <Listener className="org.apache.catalina.core.JasperListener"/>
-  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
-  <Listener
-    className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
-  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
-  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
-  <Listener
-    className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
-
-  <!-- Global JNDI resources
-       Documentation at /docs/jndi-resources-howto.html
-  -->
-  <GlobalNamingResources>
-    <!-- Editable user database that can also be used by
-         UserDatabaseRealm to authenticate users
-    -->
-    <Resource name="UserDatabase" auth="Container"
-              type="org.apache.catalina.UserDatabase"
-              description="User database that can be updated and saved"
-              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
-              pathname="conf/tomcat-users.xml"/>
-  </GlobalNamingResources>
-
-  <!-- A "Service" is a collection of one or more "Connectors" that share
-       a single "Container" Note:  A "Service" is not itself a "Container",
-       so you may not define subcomponents such as "Valves" at this level.
-       Documentation at /docs/config/service.html
-   -->
-  <Service name="Catalina">
-
-    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
-    <!--
-    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
-        maxThreads="150" minSpareThreads="4"/>
-    -->
-
-
-    <!-- A "Connector" represents an endpoint by which requests are received
-         and responses are returned. Documentation at :
-         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
-         Java AJP  Connector: /docs/config/ajp.html
-         APR (HTTP/AJP) Connector: /docs/apr.html
-         Define a non-SSL HTTP/1.1 Connector on port ${kms.http.port}
-    -->
-    <Connector port="${kms.http.port}" protocol="HTTP/1.1"
-               maxThreads="${kms.max.threads}"
-               connectionTimeout="20000"
-               redirectPort="8443"
-               maxHttpHeaderSize="${kms.max.http.header.size}"/>
-    <!-- A "Connector" using the shared thread pool-->
-    <!--
-    <Connector executor="tomcatThreadPool"
-               port="${kms.http.port}" protocol="HTTP/1.1"
-               connectionTimeout="20000"
-               redirectPort="8443" />
-    -->
-    <!-- Define a SSL HTTP/1.1 Connector on port 8443
-         This connector uses the JSSE configuration, when using APR, the
-         connector should be using the OpenSSL style configuration
-         described in the APR documentation -->
-    <!--
-    <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true"
-               clientAuth="false" sslProtocol="TLS" />
-    -->
-
-    <!-- Define an AJP 1.3 Connector on port 8009 -->
-
-
-    <!-- An Engine represents the entry point (within Catalina) that processes
- every request.  The Engine implementation for Tomcat stand alone
- analyzes the HTTP headers included with the request, and passes them
- on to the appropriate Host (virtual host).
- Documentation at /docs/config/engine.html -->
-
-    <!-- You should set jvmRoute to support load-balancing via AJP ie :
-    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-    -->
-    <Engine name="Catalina" defaultHost="localhost">
-
-      <!--For clustering, please take a look at documentation at:
-          /docs/cluster-howto.html  (simple how to)
-          /docs/config/cluster.html (reference documentation) -->
-      <!--
-      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-      -->
-
-      <!-- The request dumper valve dumps useful debugging information about
-           the request and response data received and sent by Tomcat.
-           Documentation at: /docs/config/valve.html -->
-      <!--
-      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-      -->
-
-      <!-- This Realm uses the UserDatabase configured in the global JNDI
-           resources under the key "UserDatabase".  Any edits
-           that are performed against this UserDatabase are immediately
-           available for use by the Realm.  -->
-      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
-             resourceName="UserDatabase"/>
-
-      <!-- Define the default virtual host
-           Note: XML Schema validation will not work with Xerces 2.2.
-       -->
-      <Host name="localhost" appBase="webapps"
-            unpackWARs="true" autoDeploy="true"
-            xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- SingleSignOn valve, share authentication between web applications
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-        -->
-
-        <!-- Access log processes all example.
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
-               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-        -->
-
-      </Host>
-    </Engine>
-  </Service>
-</Server>

+ 0 - 136
hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml.conf

@@ -1,136 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
-
-   All Rights Reserved.
-
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Note:  A "Server" is not itself a "Container", so you may not
-     define subcomponents such as "Valves" at this level.
-     Documentation at /docs/config/server.html
- -->
-<Server port="${kms.admin.port}" shutdown="SHUTDOWN">
-
-  <!--APR library loader. Documentation at /docs/apr.html -->
-  <Listener className="org.apache.catalina.core.AprLifecycleListener"
-            SSLEngine="on"/>
-  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
-  <Listener className="org.apache.catalina.core.JasperListener"/>
-  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
-  <Listener
-    className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
-  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
-  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
-  <Listener
-    className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
-
-  <!-- Global JNDI resources
-       Documentation at /docs/jndi-resources-howto.html
-  -->
-  <GlobalNamingResources>
-    <!-- Editable user database that can also be used by
-         UserDatabaseRealm to authenticate users
-    -->
-    <Resource name="UserDatabase" auth="Container"
-              type="org.apache.catalina.UserDatabase"
-              description="User database that can be updated and saved"
-              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
-              pathname="conf/tomcat-users.xml"/>
-  </GlobalNamingResources>
-
-  <!-- A "Service" is a collection of one or more "Connectors" that share
-       a single "Container" Note:  A "Service" is not itself a "Container",
-       so you may not define subcomponents such as "Valves" at this level.
-       Documentation at /docs/config/service.html
-   -->
-  <Service name="Catalina">
-
-    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
-    <!--
-    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
-        maxThreads="150" minSpareThreads="4"/>
-    -->
-
-    <!-- Define a SSL HTTP/1.1 Connector on port 8443
-         This connector uses the JSSE configuration, when using APR, the
-         connector should be using the OpenSSL style configuration
-         described in the APR documentation -->
-    <Connector port="${kms.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="${kms.max.threads}" scheme="https" secure="true"
-               maxHttpHeaderSize="${kms.max.http.header.size}"
-               clientAuth="false" sslEnabledProtocols="TLSv1,TLSv1.1,TLSv1.2,SSLv2Hello"
-               keystoreFile="${kms.ssl.keystore.file}"
-               keystorePass="_kms_ssl_keystore_pass_"/>
-
-    <!-- Define an AJP 1.3 Connector on port 8009 -->
-
-
-    <!-- An Engine represents the entry point (within Catalina) that processes
- every request.  The Engine implementation for Tomcat stand alone
- analyzes the HTTP headers included with the request, and passes them
- on to the appropriate Host (virtual host).
- Documentation at /docs/config/engine.html -->
-
-    <!-- You should set jvmRoute to support load-balancing via AJP ie :
-    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-    -->
-    <Engine name="Catalina" defaultHost="localhost">
-
-      <!--For clustering, please take a look at documentation at:
-          /docs/cluster-howto.html  (simple how to)
-          /docs/config/cluster.html (reference documentation) -->
-      <!--
-      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-      -->
-
-      <!-- The request dumper valve dumps useful debugging information about
-           the request and response data received and sent by Tomcat.
-           Documentation at: /docs/config/valve.html -->
-      <!--
-      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-      -->
-
-      <!-- This Realm uses the UserDatabase configured in the global JNDI
-           resources under the key "UserDatabase".  Any edits
-           that are performed against this UserDatabase are immediately
-           available for use by the Realm.  -->
-      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
-             resourceName="UserDatabase"/>
-
-      <!-- Define the default virtual host
-           Note: XML Schema validation will not work with Xerces 2.2.
-       -->
-      <Host name="localhost" appBase="webapps"
-            unpackWARs="true" autoDeploy="true"
-            xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- SingleSignOn valve, share authentication between web applications
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-        -->
-
-        <!-- Access log processes all example.
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
-               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-        -->
-
-      </Host>
-    </Engine>
-  </Service>
-</Server>

+ 49 - 0
hadoop-common-project/hadoop-kms/src/site/configuration.xsl

@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:output method="html"/>
+  <xsl:template match="configuration">
+    <html>
+      <body>
+        <h2>Configuration Properties</h2>
+        <table border="1">
+          <tr>
+            <th>name</th>
+            <th>value</th>
+            <th>description</th>
+          </tr>
+          <xsl:for-each select="property">
+            <tr>
+              <td>
+                <a name="{name}">
+                  <xsl:value-of select="name"/>
+                </a>
+              </td>
+              <td>
+                <xsl:value-of select="value"/>
+              </td>
+              <td>
+                <xsl:value-of select="description"/>
+              </td>
+            </tr>
+          </xsl:for-each>
+        </table>
+      </body>
+    </html>
+  </xsl:template>
+</xsl:stylesheet>

+ 84 - 38
hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm

@@ -29,7 +29,7 @@ The client is a KeyProvider implementation interacts with the KMS using the KMS
 
 
 KMS and its client have built-in security and they support HTTP SPNEGO Kerberos authentication and HTTPS secure transport.
 KMS and its client have built-in security and they support HTTP SPNEGO Kerberos authentication and HTTPS secure transport.
 
 
-KMS is a Java web-application and it runs using a pre-configured Tomcat bundled with the Hadoop distribution.
+KMS is a Java Jetty web-application.
 
 
 KMS Client Configuration
 KMS Client Configuration
 ------------------------
 ------------------------
@@ -51,6 +51,15 @@ The following is an example to configure HDFS NameNode as a KMS client in
 KMS
 KMS
 ---
 ---
 
 
+$H3 Start/Stop the KMS
+
+To start/stop KMS, use `hadoop --daemon start|stop kms`. For example:
+
+    hadoop-${project.version} $ hadoop --daemon start kms
+
+NOTE: The script `kms.sh` is deprecated. It is now just a wrapper of
+`hadoop kms`.
+
 $H3 KMS Configuration
 $H3 KMS Configuration
 
 
 Configure the KMS backing KeyProvider properties in the `etc/hadoop/kms-site.xml` configuration file:
 Configure the KMS backing KeyProvider properties in the `etc/hadoop/kms-site.xml` configuration file:
@@ -71,6 +80,15 @@ The password file is looked up in the Hadoop's configuration directory via the c
 
 
 NOTE: You need to restart the KMS for the configuration changes to take effect.
 NOTE: You need to restart the KMS for the configuration changes to take effect.
 
 
+$H3 KMS HTTP Configuration
+
+KMS pre-configures the HTTP port to 9600.
+
+KMS supports the following HTTP [configuration properties](./kms-default.html)
+in `etc/hadoop/kms-site.xml`.
+
+NOTE: You need to restart the KMS for the configuration changes to take effect.
+
 $H3 KMS Cache
 $H3 KMS Cache
 
 
 KMS has two kinds of caching: a CachingKeyProvider for caching the encryption keys, and a KeyProvider for caching the EEKs.
 KMS has two kinds of caching: a CachingKeyProvider for caching the encryption keys, and a KeyProvider for caching the EEKs.
@@ -180,36 +198,6 @@ The Aggregation interval is configured via the property :
         <value>10000</value>
         <value>10000</value>
       </property>
       </property>
 
 
-$H3 Start/Stop the KMS
-
-To start/stop KMS use KMS's sbin/kms.sh script. For example:
-
-    hadoop-${project.version} $ sbin/kms.sh start
-
-NOTE: Invoking the script without any parameters list all possible parameters (start, stop, run, etc.). The `kms.sh` script is a wrapper for Tomcat's `catalina.sh` script that sets the environment variables and Java System properties required to run KMS.
-
-$H3 Embedded Tomcat Configuration
-
-To configure the embedded Tomcat go to the `share/hadoop/kms/tomcat/conf`.
-
-KMS pre-configures the HTTP and Admin ports in Tomcat's `server.xml` to 9600 and 9601.
-
-Tomcat logs are also preconfigured to go to Hadoop's `logs/` directory.
-
-The following environment variables (which can be set in KMS's `etc/hadoop/kms-env.sh` script) can be used to alter those values:
-
-* KMS_HTTP_PORT
-* KMS_ADMIN_PORT
-* KMS_MAX_THREADS
-* KMS_MAX_HTTP_HEADER_SIZE
-* KMS_LOGNOTE: You need to restart the KMS for the configuration changes to take effect.
-
-$H3 Loading native libraries
-
-The following environment variable (which can be set in KMS's `etc/hadoop/kms-env.sh` script) can be used to specify the location of any required native libraries. For eg. Tomact native Apache Portable Runtime (APR) libraries:
-
-* JAVA_LIBRARY_PATH
-
 $H3 KMS Security Configuration
 $H3 KMS Security Configuration
 
 
 $H4 Enabling Kerberos HTTP SPNEGO Authentication
 $H4 Enabling Kerberos HTTP SPNEGO Authentication
@@ -279,20 +267,52 @@ If `users`, `groups` or `hosts` has a `*`, it means there are no restrictions fo
 
 
 $H4 KMS over HTTPS (SSL)
 $H4 KMS over HTTPS (SSL)
 
 
-To configure KMS to work over HTTPS the following 2 properties must be set in the `etc/hadoop/kms_env.sh` script (shown with default values):
+Enable SSL in `etc/hadoop/kms-site.xml`:
 
 
-* KMS_SSL_KEYSTORE_FILE=$HOME/.keystore
-* KMS_SSL_KEYSTORE_PASS=password
+```xml
+  <property>
+    <name>hadoop.kms.ssl.enabled</name>
+    <value>true</value>
+    <description>
+      Whether SSL is enabled. Default is false, i.e. disabled.
+    </description>
+  </property>
+
+```
 
 
-In the KMS `tomcat/conf` directory, replace the `server.xml` file with the provided `ssl-server.xml` file.
+Configure `etc/hadoop/ssl-server.xml` with proper values, for example:
+
+```xml
+<property>
+  <name>ssl.server.keystore.location</name>
+  <value>${user.home}/.keystore</value>
+  <description>Keystore to be used. Must be specified.</description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.password</name>
+  <value></value>
+  <description>Must be specified.</description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.keypassword</name>
+  <value></value>
+  <description>Must be specified.</description>
+</property>
+```
 
 
 You need to create an SSL certificate for the KMS. As the `kms` Unix user, using the Java `keytool` command to create the SSL certificate:
 You need to create an SSL certificate for the KMS. As the `kms` Unix user, using the Java `keytool` command to create the SSL certificate:
 
 
-    $ keytool -genkey -alias tomcat -keyalg RSA
+    $ keytool -genkey -alias jetty -keyalg RSA
 
 
-You will be asked a series of questions in an interactive prompt. It will create the keystore file, which will be named **.keystore** and located in the `kms` user home directory.
+You will be asked a series of questions in an interactive prompt. It will
+create the keystore file, which will be named **.keystore** and located in the
+user's home directory.
 
 
-The password you enter for "keystore password" must match the value of the `KMS_SSL_KEYSTORE_PASS` environment variable set in the `kms-env.sh` script in the configuration directory.
+The password you enter for "keystore password" must match the value of the
+property `ssl.server.keystore.password` set in the `ssl-server.xml` in the
+configuration directory.
 
 
 The answer to "What is your first and last name?" (i.e. "CN") must be the hostname of the machine where the KMS will be running.
 The answer to "What is your first and last name?" (i.e. "CN") must be the hostname of the machine where the KMS will be running.
 
 
@@ -1032,3 +1052,29 @@ $H4 Get Keys Metadata
       },
       },
       ...
       ...
     ]
     ]
+
+$H3 Deprecated Environment Variables
+
+The following environment variables are deprecated. Set the corresponding
+configuration properties instead.
+
+Environment Variable     | Configuration Property       | Configuration File
+-------------------------|------------------------------|--------------------
+KMS_HTTP_PORT            | hadoop.kms.http.port         | kms-site.xml
+KMS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | kms-site.xml
+KMS_MAX_THREADS          | hadoop.http.max.threads      | kms-site.xml
+KMS_SSL_ENABLED          | hadoop.kms.ssl.enabled       | kms-site.xml
+KMS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
+KMS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
+KMS_TEMP                 | hadoop.http.temp.dir         | kms-site.xml
+
+$H3 Default HTTP Services
+
+Name               | Description
+-------------------|------------------------------------
+/conf              | Display configuration properties
+/jmx               | Java JMX management interface
+/logLevel          | Get or set log level per class
+/logs              | Display log files
+/stacks            | Display JVM stacks
+/static/index.html | The static home page

+ 21 - 97
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java

@@ -17,83 +17,23 @@
  */
  */
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
-import com.google.common.base.Preconditions;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.http.JettyUtils;
-import org.apache.hadoop.util.ThreadUtil;
-import org.eclipse.jetty.http.HttpVersion;
-import org.eclipse.jetty.server.ConnectionFactory;
-import org.eclipse.jetty.server.HttpConfiguration;
-import org.eclipse.jetty.server.HttpConnectionFactory;
-import org.eclipse.jetty.server.SecureRequestCustomizer;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.server.SslConnectionFactory;
-import org.eclipse.jetty.util.ssl.SslContextFactory;
-import org.eclipse.jetty.webapp.WebAppContext;
-
 import java.io.File;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.FileWriter;
+import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.io.Writer;
 import java.io.Writer;
-import java.io.IOException;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URL;
-import java.util.UUID;
-
-public class MiniKMS {
 
 
-  private static Server createJettyServer(String keyStore, String password, int inPort) {
-    try {
-      boolean ssl = keyStore != null;
-      String host = "localhost";
-      Server server = new Server();
-      ServerConnector conn = new ServerConnector(server);
-      HttpConfiguration httpConfig = new HttpConfiguration();
-      httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setSecureScheme("https");
-      httpConfig.addCustomizer(new SecureRequestCustomizer());
-      ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
-      conn.addConnectionFactory(connFactory);
-      conn.setHost(host);
-      conn.setPort(inPort);
-      if (ssl) {
-        SslContextFactory sslContextFactory = new SslContextFactory();
-        sslContextFactory.setNeedClientAuth(false);
-        sslContextFactory.setKeyStorePath(keyStore);
-        sslContextFactory.setKeyStoreType("jks");
-        sslContextFactory.setKeyStorePassword(password);
-        conn.addFirstConnectionFactory(
-            new SslConnectionFactory(sslContextFactory,
-            HttpVersion.HTTP_1_1.asString()));
-      }
-      server.addConnector(conn);
-      return server;
-    } catch (Exception ex) {
-      throw new RuntimeException("Could not start embedded servlet container, "
-          + ex.getMessage(), ex);
-    }
-  }
+import com.google.common.base.Preconditions;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.ThreadUtil;
 
 
-  private static URL getJettyURL(Server server) {
-    boolean ssl = server.getConnectors()[0]
-        .getConnectionFactory(SslConnectionFactory.class) != null;
-    try {
-      String scheme = (ssl) ? "https" : "http";
-      return new URL(scheme + "://" +
-          ((ServerConnector)server.getConnectors()[0]).getHost() + ":"
-          + ((ServerConnector)server.getConnectors()[0]).getLocalPort());
-    } catch (MalformedURLException ex) {
-      throw new RuntimeException("It should never happen, " + ex.getMessage(),
-          ex);
-    }
-  }
+public class MiniKMS {
 
 
   public static class Builder {
   public static class Builder {
     private File kmsConfDir;
     private File kmsConfDir;
@@ -150,7 +90,7 @@ public class MiniKMS {
   private String log4jConfFile;
   private String log4jConfFile;
   private String keyStore;
   private String keyStore;
   private String keyStorePassword;
   private String keyStorePassword;
-  private Server jetty;
+  private KMSWebServer jetty;
   private int inPort;
   private int inPort;
   private URL kmsURL;
   private URL kmsURL;
 
 
@@ -178,7 +118,6 @@ public class MiniKMS {
   }
   }
 
 
   public void start() throws Exception {
   public void start() throws Exception {
-    ClassLoader cl = Thread.currentThread().getContextClassLoader();
     System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
     System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
     File aclsFile = new File(kmsConfDir, "kms-acls.xml");
     File aclsFile = new File(kmsConfDir, "kms-acls.xml");
     if (!aclsFile.exists()) {
     if (!aclsFile.exists()) {
@@ -202,35 +141,20 @@ public class MiniKMS {
       writer.close();
       writer.close();
     }
     }
     System.setProperty("log4j.configuration", log4jConfFile);
     System.setProperty("log4j.configuration", log4jConfFile);
-    jetty = createJettyServer(keyStore, keyStorePassword, inPort);
-
-    // we need to do a special handling for MiniKMS to work when in a dir and
-    // when in a JAR in the classpath thanks to Jetty way of handling of webapps
-    // when they are in the a DIR, WAR or JAR.
-    URL webXmlUrl = cl.getResource("kms-webapp/WEB-INF/web.xml");
-    if (webXmlUrl == null) {
-      throw new RuntimeException(
-          "Could not find kms-webapp/ dir in test classpath");
-    }
-    boolean webXmlInJar = webXmlUrl.getPath().contains(".jar!/");
-    String webappPath;
-    if (webXmlInJar) {
-      File webInf = new File("target/" + UUID.randomUUID().toString() +
-          "/kms-webapp/WEB-INF");
-      webInf.mkdirs();
-      new File(webInf, "web.xml").delete();
-      copyResource("kms-webapp/WEB-INF/web.xml", new File(webInf, "web.xml"));
-      webappPath = webInf.getParentFile().getAbsolutePath();
-    } else {
-      webappPath = cl.getResource("kms-webapp").getPath();
-    }
-    WebAppContext context = new WebAppContext(webappPath, "/kms");
-    if (webXmlInJar) {
-      context.setClassLoader(cl);
+
+    final Configuration conf = KMSConfiguration.getKMSConf();
+    conf.set(KMSConfiguration.HTTP_HOST_KEY, "localhost");
+    conf.setInt(KMSConfiguration.HTTP_PORT_KEY, inPort);
+    if (keyStore != null) {
+      conf.setBoolean(KMSConfiguration.SSL_ENABLED_KEY, true);
+      conf.set(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION, keyStore);
+      conf.set(SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD, keyStorePassword);
+      conf.set(SSLFactory.SSL_SERVER_KEYSTORE_TYPE, "jks");
     }
     }
-    jetty.setHandler(context);
+
+    jetty = new KMSWebServer(conf);
     jetty.start();
     jetty.start();
-    kmsURL = new URL(getJettyURL(jetty), "kms");
+    kmsURL = jetty.getKMSUrl();
   }
   }
 
 
   public URL getKMSUrl() {
   public URL getKMSUrl() {