浏览代码

merge trunk into HADOOP-10388 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HADOOP-10388@1619012 13f79535-47bb-0310-9956-ffa450edef68
Colin McCabe 11 年之前
父节点
当前提交
20bff8965c
共有 100 个文件被更改,包括 9183 次插入1603 次删除
  1. 5 0
      .gitignore
  2. 1 0
      BUILDING.txt
  3. 7 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  4. 52 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
  5. 0 28
      hadoop-client/pom.xml
  6. 38 0
      hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
  7. 50 0
      hadoop-common-project/hadoop-auth/pom.xml
  8. 10 26
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  9. 111 16
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  10. 76 37
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  11. 20 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  12. 14 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  13. 55 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  14. 49 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
  15. 139 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
  16. 32 14
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
  17. 62 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java
  18. 49 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
  19. 0 5
      hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
  20. 0 4
      hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
  21. 0 4
      hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm
  22. 8 30
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  23. 143 22
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  24. 69 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  25. 16 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  26. 108 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
  27. 63 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
  28. 79 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java
  29. 69 16
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
  30. 33 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java
  31. 599 16
      hadoop-common-project/hadoop-common/CHANGES.txt
  32. 10 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  33. 17 20
      hadoop-common-project/hadoop-common/pom.xml
  34. 6 0
      hadoop-common-project/hadoop-common/src/JNIFlags.cmake
  35. 132 92
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  36. 147 251
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  37. 28 174
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
  38. 26 11
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
  39. 1036 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  40. 93 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
  41. 10 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  42. 16 35
      hadoop-common-project/hadoop-common/src/main/bin/rcc
  43. 23 28
      hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
  44. 26 12
      hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
  45. 25 11
      hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
  46. 372 51
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  47. 16 14
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
  48. 0 2
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  49. 0 2
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  50. 666 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  51. 166 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  52. 174 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java
  53. 237 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  54. 128 80
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  55. 383 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  56. 115 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
  57. 128 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java
  58. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java
  59. 161 108
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  60. 6 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java
  61. 788 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  62. 62 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  63. 317 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  64. 134 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  65. 9 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
  66. 13 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  67. 6 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
  68. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  69. 40 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  70. 37 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
  71. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
  72. 43 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
  73. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
  74. 208 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  75. 15 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  76. 227 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  77. 52 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  78. 47 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  79. 46 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  80. 24 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
  81. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  82. 34 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
  83. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
  84. 16 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  85. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
  86. 121 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java
  87. 71 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java
  88. 80 37
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  89. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
  90. 15 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
  91. 134 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
  92. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  93. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java
  94. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  95. 146 82
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
  96. 81 27
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  97. 37 100
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
  98. 111 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  99. 50 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  100. 31 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java

+ 5 - 0
.gitignore

@@ -1,11 +1,16 @@
 *.iml
 *.ipr
 *.iws
+*.orig
+*.rej
 .idea
 .svn
 .classpath
 .project
 .settings
 target
+hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
+hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
+hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml

+ 1 - 0
BUILDING.txt

@@ -189,6 +189,7 @@ Requirements:
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
+* CMake 2.6 or newer
 * Windows SDK or Visual Studio 2010 Professional
 * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
 * zlib headers (if building native code bindings for zlib)

+ 7 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -29,6 +29,7 @@
         <exclude>*-config.cmd</exclude>
         <exclude>start-*.cmd</exclude>
         <exclude>stop-*.cmd</exclude>
+        <exclude>hadoop-layout.sh.example</exclude>
       </excludes>
       <fileMode>0755</fileMode>
     </fileSet>
@@ -42,6 +43,8 @@
       <includes>
         <include>*-config.sh</include>
         <include>*-config.cmd</include>
+        <include>*-functions.sh</include>
+        <include>hadoop-layout.sh.example</include>
       </includes>
       <fileMode>0755</fileMode>
     </fileSet>
@@ -57,6 +60,10 @@
         <exclude>hadoop.cmd</exclude>
         <exclude>hdfs.cmd</exclude>
         <exclude>hadoop-config.cmd</exclude>
+        <exclude>hadoop-functions.sh</exclude>
+        <exclude>hadoop-layout.sh.example</exclude>
+        <exclude>hdfs-config.cmd</exclude>
+        <exclude>hdfs-config.sh</exclude>
       </excludes>
       <fileMode>0755</fileMode>
     </fileSet>

+ 52 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-kms-dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <!-- Configuration files -->
+    <fileSet>
+      <directory>${basedir}/src/main/conf</directory>
+      <outputDirectory>/etc/hadoop</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sbin</directory>
+      <outputDirectory>/sbin</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/libexec</directory>
+      <outputDirectory>/libexec</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <!-- Documentation -->
+    <fileSet>
+      <directory>${project.build.directory}/site</directory>
+      <outputDirectory>/share/doc/hadoop/kms</outputDirectory>
+    </fileSet>
+  </fileSets>
+</assembly>

+ 0 - 28
hadoop-client/pom.xml

@@ -39,22 +39,10 @@
       <artifactId>hadoop-common</artifactId>
       <scope>compile</scope>
       <exclusions>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>commons-logging</groupId>
           <artifactId>commons-logging-api</artifactId>
@@ -71,10 +59,6 @@
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty-util</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>servlet-api-2.5</artifactId>
@@ -111,10 +95,6 @@
           <groupId>com.jcraft</groupId>
           <artifactId>jsch</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>commons-el</groupId>
-          <artifactId>commons-el</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
 
@@ -147,14 +127,6 @@
           <groupId>javax.servlet</groupId>
           <artifactId>servlet-api</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
       </exclusions>
     </dependency>
 

+ 38 - 0
hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml

@@ -0,0 +1,38 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <!--
+    Caller is not supposed to modify returned values even though there's nothing
+    stopping them; we do this for performance reasons.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.RolloverSignerSecretProvider" />
+    <Method name="getAllSecrets" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.StringSignerSecretProvider" />
+    <Method name="getAllSecrets" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.StringSignerSecretProvider" />
+    <Method name="getCurrentSecret" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
+</FindBugsFilter>

+ 50 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -97,6 +97,29 @@
       <artifactId>httpclient</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.directory.server</groupId>
+      <artifactId>apacheds-kerberos-codec</artifactId>
+      <scope>compile</scope>
+        <exclusions>
+          <exclusion>
+            <groupId>org.apache.directory.api</groupId>
+            <artifactId>api-asn1-ber</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.apache.directory.api</groupId>
+            <artifactId>api-i18n</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>org.apache.directory.api</groupId>
+            <artifactId>api-ldap-model</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>net.sf.ehcache</groupId>
+            <artifactId>ehcache-core</artifactId>
+          </exclusion>
+        </exclusions>
+    </dependency>
   </dependencies>
 
   <build>
@@ -116,6 +139,33 @@
           <attach>true</attach>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>prepare-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>prepare-test-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 

+ 10 - 26
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -120,32 +120,6 @@ public class AuthenticatedURL {
       return token;
     }
 
-    /**
-     * Return the hashcode for the token.
-     *
-     * @return the hashcode for the token.
-     */
-    @Override
-    public int hashCode() {
-      return (token != null) ? token.hashCode() : 0;
-    }
-
-    /**
-     * Return if two token instances are equal.
-     *
-     * @param o the other token instance.
-     *
-     * @return if this instance and the other instance are equal.
-     */
-    @Override
-    public boolean equals(Object o) {
-      boolean eq = false;
-      if (o instanceof Token) {
-        Token other = (Token) o;
-        eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
-      }
-      return eq;
-    }
   }
 
   private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@@ -208,6 +182,16 @@ public class AuthenticatedURL {
     this.authenticator.setConnectionConfigurator(connConfigurator);
   }
 
+  /**
+   * Returns the {@link Authenticator} instance used by the
+   * <code>AuthenticatedURL</code>.
+   *
+   * @return the {@link Authenticator} instance
+   */
+  protected Authenticator getAuthenticator() {
+    return authenticator;
+  }
+
   /**
    * Returns an authenticated {@link HttpURLConnection}.
    *

+ 111 - 16
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -19,6 +19,9 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.SignerException;
+import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -107,11 +110,29 @@ public class AuthenticationFilter implements Filter {
    */
   public static final String COOKIE_PATH = "cookie.path";
 
-  private static final Random RAN = new Random();
+  /**
+   * Constant for the configuration property that indicates the name of the
+   * SignerSecretProvider class to use.  If not specified, SIGNATURE_SECRET
+   * will be used or a random secret.
+   */
+  public static final String SIGNER_SECRET_PROVIDER_CLASS =
+          "signer.secret.provider";
+
+  /**
+   * Constant for the attribute that can be used for providing a custom
+   * object that subclasses the SignerSecretProvider.  Note that this should be
+   * set in the ServletContext and the class should already be initialized.  
+   * If not specified, SIGNER_SECRET_PROVIDER_CLASS will be used.
+   */
+  public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
+      "org.apache.hadoop.security.authentication.util.SignerSecretProvider";
 
+  private Properties config;
   private Signer signer;
+  private SignerSecretProvider secretProvider;
   private AuthenticationHandler authHandler;
   private boolean randomSecret;
+  private boolean customSecretProvider;
   private long validity;
   private String cookieDomain;
   private String cookiePath;
@@ -130,15 +151,19 @@ public class AuthenticationFilter implements Filter {
   public void init(FilterConfig filterConfig) throws ServletException {
     String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
     configPrefix = (configPrefix != null) ? configPrefix + "." : "";
-    Properties config = getConfiguration(configPrefix, filterConfig);
+    config = getConfiguration(configPrefix, filterConfig);
     String authHandlerName = config.getProperty(AUTH_TYPE, null);
     String authHandlerClassName;
     if (authHandlerName == null) {
-      throw new ServletException("Authentication type must be specified: simple|kerberos|<class>");
+      throw new ServletException("Authentication type must be specified: " +
+          PseudoAuthenticationHandler.TYPE + "|" + 
+          KerberosAuthenticationHandler.TYPE + "|<class>");
     }
-    if (authHandlerName.equals("simple")) {
+    if (authHandlerName.toLowerCase(Locale.ENGLISH).equals(
+        PseudoAuthenticationHandler.TYPE)) {
       authHandlerClassName = PseudoAuthenticationHandler.class.getName();
-    } else if (authHandlerName.equals("kerberos")) {
+    } else if (authHandlerName.toLowerCase(Locale.ENGLISH).equals(
+        KerberosAuthenticationHandler.TYPE)) {
       authHandlerClassName = KerberosAuthenticationHandler.class.getName();
     } else {
       authHandlerClassName = authHandlerName;
@@ -155,19 +180,62 @@ public class AuthenticationFilter implements Filter {
     } catch (IllegalAccessException ex) {
       throw new ServletException(ex);
     }
-    String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
-    if (signatureSecret == null) {
-      signatureSecret = Long.toString(RAN.nextLong());
-      randomSecret = true;
-      LOG.warn("'signature.secret' configuration not set, using a random value as secret");
+
+    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
+        * 1000; //10 hours
+    secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
+        getAttribute(SIGNATURE_PROVIDER_ATTRIBUTE);
+    if (secretProvider == null) {
+      String signerSecretProviderClassName =
+          config.getProperty(configPrefix + SIGNER_SECRET_PROVIDER_CLASS, null);
+      if (signerSecretProviderClassName == null) {
+        String signatureSecret =
+            config.getProperty(configPrefix + SIGNATURE_SECRET, null);
+        if (signatureSecret != null) {
+          secretProvider = new StringSignerSecretProvider(signatureSecret);
+        } else {
+          secretProvider = new RandomSignerSecretProvider();
+          randomSecret = true;
+        }
+      } else {
+        try {
+          Class<?> klass = Thread.currentThread().getContextClassLoader().
+              loadClass(signerSecretProviderClassName);
+          secretProvider = (SignerSecretProvider) klass.newInstance();
+          customSecretProvider = true;
+        } catch (ClassNotFoundException ex) {
+          throw new ServletException(ex);
+        } catch (InstantiationException ex) {
+          throw new ServletException(ex);
+        } catch (IllegalAccessException ex) {
+          throw new ServletException(ex);
+        }
+      }
+      try {
+        secretProvider.init(config, validity);
+      } catch (Exception ex) {
+        throw new ServletException(ex);
+      }
+    } else {
+      customSecretProvider = true;
     }
-    signer = new Signer(signatureSecret.getBytes());
-    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000")) * 1000; //10 hours
+    signer = new Signer(secretProvider);
 
     cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
     cookiePath = config.getProperty(COOKIE_PATH, null);
   }
 
+  /**
+   * Returns the configuration properties of the {@link AuthenticationFilter}
+   * without the prefix. The returned properties are the same that the
+   * {@link #getConfiguration(String, FilterConfig)} method returned.
+   *
+   * @return the configuration properties.
+   */
+  protected Properties getConfiguration() {
+    return config;
+  }
+
   /**
    * Returns the authentication handler being used.
    *
@@ -186,6 +254,15 @@ public class AuthenticationFilter implements Filter {
     return randomSecret;
   }
 
+  /**
+   * Returns if a custom implementation of a SignerSecretProvider is being used.
+   *
+   * @return if a custom implementation of a SignerSecretProvider is being used.
+   */
+  protected boolean isCustomSignerSecretProvider() {
+    return customSecretProvider;
+  }
+
   /**
    * Returns the validity time of the generated tokens.
    *
@@ -224,6 +301,9 @@ public class AuthenticationFilter implements Filter {
       authHandler.destroy();
       authHandler = null;
     }
+    if (secretProvider != null) {
+      secretProvider.destroy();
+    }
   }
 
   /**
@@ -389,7 +469,7 @@ public class AuthenticationFilter implements Filter {
             createAuthCookie(httpResponse, signedToken, getCookieDomain(),
                     getCookiePath(), token.getExpires(), isHttps);
           }
-          filterChain.doFilter(httpRequest, httpResponse);
+          doFilter(filterChain, httpRequest, httpResponse);
         }
       } else {
         unauthorizedResponse = false;
@@ -413,6 +493,15 @@ public class AuthenticationFilter implements Filter {
     }
   }
 
+  /**
+   * Delegates call to the servlet filter chain. Sub-classes my override this
+   * method to perform pre and post tasks.
+   */
+  protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+      HttpServletResponse response) throws IOException, ServletException {
+    filterChain.doFilter(request, response);
+  }
+
   /**
    * Creates the Hadoop authentication HTTP cookie.
    *
@@ -421,14 +510,20 @@ public class AuthenticationFilter implements Filter {
    *                cookie. It has no effect if its value < 0.
    *
    * XXX the following code duplicate some logic in Jetty / Servlet API,
-   * because of the fact that Hadoop is stuck at servlet 3.0 and jetty 6
+   * because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6
    * right now.
    */
   public static void createAuthCookie(HttpServletResponse resp, String token,
                                       String domain, String path, long expires,
                                       boolean isSecure) {
-    StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE).append
-            ("=").append(token);
+    StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
+                           .append("=");
+    if (token != null && token.length() > 0) {
+      sb.append("\"")
+          .append(token)
+          .append("\"");
+    }
+    sb.append("; Version=1");
 
     if (path != null) {
       sb.append("; Path=").append(path);

+ 76 - 37
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -34,16 +34,18 @@ import javax.security.auth.login.LoginException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.File;
 import java.io.IOException;
-import java.security.Principal;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
+import java.util.regex.Pattern;
 
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
@@ -140,10 +142,29 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   public static final String NAME_RULES = TYPE + ".name.rules";
 
-  private String principal;
+  private String type;
   private String keytab;
   private GSSManager gssManager;
-  private LoginContext loginContext;
+  private Subject serverSubject = new Subject();
+  private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
+
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with the default
+   * auth-token type, <code>kerberos</code>.
+   */
+  public KerberosAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public KerberosAuthenticationHandler(String type) {
+    this.type = type;
+  }
 
   /**
    * Initializes the authentication handler instance.
@@ -159,7 +180,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   @Override
   public void init(Properties config) throws ServletException {
     try {
-      principal = config.getProperty(PRINCIPAL, principal);
+      String principal = config.getProperty(PRINCIPAL);
       if (principal == null || principal.trim().length() == 0) {
         throw new ServletException("Principal not defined in configuration");
       }
@@ -170,23 +191,40 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       if (!new File(keytab).exists()) {
         throw new ServletException("Keytab does not exist: " + keytab);
       }
+      
+      // use all SPNEGO principals in the keytab if a principal isn't
+      // specifically configured
+      final String[] spnegoPrincipals;
+      if (principal.equals("*")) {
+        spnegoPrincipals = KerberosUtil.getPrincipalNames(
+            keytab, Pattern.compile("HTTP/.*"));
+        if (spnegoPrincipals.length == 0) {
+          throw new ServletException("Principals do not exist in the keytab");
+        }
+      } else {
+        spnegoPrincipals = new String[]{principal};
+      }
 
       String nameRules = config.getProperty(NAME_RULES, null);
       if (nameRules != null) {
         KerberosName.setRules(nameRules);
       }
       
-      Set<Principal> principals = new HashSet<Principal>();
-      principals.add(new KerberosPrincipal(principal));
-      Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
-
-      KerberosConfiguration kerberosConfiguration = new KerberosConfiguration(keytab, principal);
-
-      LOG.info("Login using keytab "+keytab+", for principal "+principal);
-      loginContext = new LoginContext("", subject, null, kerberosConfiguration);
-      loginContext.login();
-
-      Subject serverSubject = loginContext.getSubject();
+      for (String spnegoPrincipal : spnegoPrincipals) {
+        LOG.info("Login using keytab {}, for principal {}",
+            keytab, spnegoPrincipal);
+        final KerberosConfiguration kerberosConfiguration =
+            new KerberosConfiguration(keytab, spnegoPrincipal);
+        final LoginContext loginContext =
+            new LoginContext("", serverSubject, null, kerberosConfiguration);
+        try {
+          loginContext.login();
+        } catch (LoginException le) {
+          LOG.warn("Failed to login as [{}]", spnegoPrincipal, le);
+          throw new AuthenticationException(le);          
+        }
+        loginContexts.add(loginContext);
+      }
       try {
         gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {
 
@@ -198,7 +236,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       } catch (PrivilegedActionException ex) {
         throw ex.getException();
       }
-      LOG.info("Initialized, principal [{}] from keytab [{}]", principal, keytab);
     } catch (Exception ex) {
       throw new ServletException(ex);
     }
@@ -211,14 +248,16 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public void destroy() {
-    try {
-      if (loginContext != null) {
+    keytab = null;
+    serverSubject = null;
+    for (LoginContext loginContext : loginContexts) {
+      try {
         loginContext.logout();
-        loginContext = null;
+      } catch (LoginException ex) {
+        LOG.warn(ex.getMessage(), ex);
       }
-    } catch (LoginException ex) {
-      LOG.warn(ex.getMessage(), ex);
     }
+    loginContexts.clear();
   }
 
   /**
@@ -229,16 +268,16 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public String getType() {
-    return TYPE;
+    return type;
   }
 
   /**
-   * Returns the Kerberos principal used by the authentication handler.
+   * Returns the Kerberos principals used by the authentication handler.
    *
-   * @return the Kerberos principal used by the authentication handler.
+   * @return the Kerberos principals used by the authentication handler.
    */
-  protected String getPrincipal() {
-    return principal;
+  protected Set<KerberosPrincipal> getPrincipals() {
+    return serverSubject.getPrincipals(KerberosPrincipal.class);
   }
 
   /**
@@ -304,7 +343,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
-      Subject serverSubject = loginContext.getSubject();
+      final String serverName = request.getServerName();
       try {
         token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
 
@@ -314,15 +353,15 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
             GSSContext gssContext = null;
             GSSCredential gssCreds = null;
             try {
-              if (IBM_JAVA) {
-                // IBM JDK needs non-null credentials to be passed to createContext here, with
-                // SPNEGO mechanism specified, otherwise JGSS will use its default mechanism
-                // only, which is Kerberos V5.
-                gssCreds = gssManager.createCredential(null, GSSCredential.INDEFINITE_LIFETIME,
-                    new Oid[]{KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
-                        KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
-                    GSSCredential.ACCEPT_ONLY);
-              }
+              gssCreds = gssManager.createCredential(
+                  gssManager.createName(
+                      KerberosUtil.getServicePrincipal("HTTP", serverName),
+                      KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+                  GSSCredential.INDEFINITE_LIFETIME,
+                  new Oid[]{
+                    KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
+                    KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
+                  GSSCredential.ACCEPT_ONLY);
               gssContext = gssManager.createContext(gssCreds);
               byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
               if (serverToken != null && serverToken.length > 0) {

+ 20 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
   private boolean acceptAnonymous;
+  private String type;
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with the default auth-token
+   * type, <code>simple</code>.
+   */
+  public PseudoAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public PseudoAuthenticationHandler(String type) {
+    this.type = type;
+  }
 
   /**
    * Initializes the authentication handler instance.
@@ -96,7 +115,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public String getType() {
-    return TYPE;
+    return type;
   }
 
   /**

+ 14 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.security.authentication.util;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Locale;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -66,7 +67,7 @@ public class KerberosName {
    */
   private static final Pattern ruleParser =
     Pattern.compile("\\s*((DEFAULT)|(RULE:\\[(\\d*):([^\\]]*)](\\(([^)]*)\\))?"+
-                    "(s/([^/]*)/([^/]*)/(g)?)?))");
+                    "(s/([^/]*)/([^/]*)/(g)?)?))/?(L)?");
 
   /**
    * A pattern that recognizes simple/non-simple names.
@@ -171,6 +172,7 @@ public class KerberosName {
     private final Pattern fromPattern;
     private final String toPattern;
     private final boolean repeat;
+    private final boolean toLowerCase;
 
     Rule() {
       isDefault = true;
@@ -180,10 +182,11 @@ public class KerberosName {
       fromPattern = null;
       toPattern = null;
       repeat = false;
+      toLowerCase = false;
     }
 
     Rule(int numOfComponents, String format, String match, String fromPattern,
-         String toPattern, boolean repeat) {
+         String toPattern, boolean repeat, boolean toLowerCase) {
       isDefault = false;
       this.numOfComponents = numOfComponents;
       this.format = format;
@@ -192,6 +195,7 @@ public class KerberosName {
         fromPattern == null ? null : Pattern.compile(fromPattern);
       this.toPattern = toPattern;
       this.repeat = repeat;
+      this.toLowerCase = toLowerCase;
     }
 
     @Override
@@ -220,6 +224,9 @@ public class KerberosName {
             buf.append('g');
           }
         }
+        if (toLowerCase) {
+          buf.append("/L");
+        }
       }
       return buf.toString();
     }
@@ -308,6 +315,9 @@ public class KerberosName {
         throw new NoMatchingRule("Non-simple name " + result +
                                  " after auth_to_local rule " + this);
       }
+      if (toLowerCase && result != null) {
+        result = result.toLowerCase(Locale.ENGLISH);
+      }
       return result;
     }
   }
@@ -328,7 +338,8 @@ public class KerberosName {
                             matcher.group(7),
                             matcher.group(9),
                             matcher.group(10),
-                            "g".equals(matcher.group(11))));
+                            "g".equals(matcher.group(11)),
+                            "L".equals(matcher.group(12))));
       }
       remaining = remaining.substring(matcher.end());
     }

+ 55 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -17,18 +17,27 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
+import java.io.File;
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Locale;
+import java.util.Set;
+import java.util.regex.Pattern;
 
+import org.apache.directory.server.kerberos.shared.keytab.Keytab;
+import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.Oid;
 
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
-
 public class KerberosUtil {
 
   /* Return the Kerberos login module name */
@@ -103,4 +112,48 @@ public class KerberosUtil {
     // with uppercase characters.
     return service + "/" + fqdn.toLowerCase(Locale.US);
   }
+
+  /**
+   * Get all the unique principals present in the keytabfile.
+   * 
+   * @param keytabFileName 
+   *          Name of the keytab file to be read.
+   * @return list of unique principals in the keytab.
+   * @throws IOException 
+   *          If keytab entries cannot be read from the file.
+   */
+  static final String[] getPrincipalNames(String keytabFileName) throws IOException {
+      Keytab keytab = Keytab.read(new File(keytabFileName));
+      Set<String> principals = new HashSet<String>();
+      List<KeytabEntry> entries = keytab.getEntries();
+      for (KeytabEntry entry: entries){
+        principals.add(entry.getPrincipalName().replace("\\", "/"));
+      }
+      return principals.toArray(new String[0]);
+    }
+
+  /**
+   * Get all the unique principals from keytabfile which matches a pattern.
+   * 
+   * @param keytab 
+   *          Name of the keytab file to be read.
+   * @param pattern 
+   *         pattern to be matched.
+   * @return list of unique principals which matches the pattern.
+   * @throws IOException 
+   */
+  public static final String[] getPrincipalNames(String keytab,
+      Pattern pattern) throws IOException {
+    String[] principals = getPrincipalNames(keytab);
+    if (principals.length != 0) {
+      List<String> matchingPrincipals = new ArrayList<String>();
+      for (String principal : principals) {
+        if (pattern.matcher(principal).matches()) {
+          matchingPrincipals.add(principal);
+        }
+      }
+      principals = matchingPrincipals.toArray(new String[0]);
+    }
+    return principals;
+  }
 }

+ 49 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Random;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A SignerSecretProvider that uses a random number as it's secret.  It rolls
+ * the secret at a regular interval.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
+
+  private final Random rand;
+
+  public RandomSignerSecretProvider() {
+    super();
+    rand = new Random();
+  }
+
+  /**
+   * This constructor lets you set the seed of the Random Number Generator and
+   * is meant for testing.
+   * @param seed the seed for the random number generator
+   */
+  public RandomSignerSecretProvider(long seed) {
+    super();
+    rand = new Random(seed);
+  }
+
+  @Override
+  protected byte[] generateNewSecret() {
+    return Long.toString(rand.nextLong()).getBytes();
+  }
+}

+ 139 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java

@@ -0,0 +1,139 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An abstract SignerSecretProvider that can be use used as the base for a
+ * rolling secret.  The secret will roll over at the same interval as the token
+ * validity, so there are only ever a maximum of two valid secrets at any
+ * given time.  This class handles storing and returning the secrets, as well
+ * as the rolling over.  At a minimum, subclasses simply need to implement the
+ * generateNewSecret() method.  More advanced implementations can override
+ * other methods to provide more advanced behavior, but should be careful when
+ * doing so.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public abstract class RolloverSignerSecretProvider
+    extends SignerSecretProvider {
+
+  private static Logger LOG = LoggerFactory.getLogger(
+    RolloverSignerSecretProvider.class);
+  /**
+   * Stores the currently valid secrets.  The current secret is the 0th element
+   * in the array.
+   */
+  private volatile byte[][] secrets;
+  private ScheduledExecutorService scheduler;
+  private boolean schedulerRunning;
+  private boolean isDestroyed;
+
+  public RolloverSignerSecretProvider() {
+    schedulerRunning = false;
+    isDestroyed = false;
+  }
+
+  /**
+   * Initialize the SignerSecretProvider.  It initializes the current secret
+   * and starts the scheduler for the rollover to run at an interval of
+   * tokenValidity.
+   * @param config filter configuration
+   * @param tokenValidity The amount of time a token is valid for
+   * @throws Exception
+   */
+  @Override
+  public void init(Properties config, long tokenValidity) throws Exception {
+    initSecrets(generateNewSecret(), null);
+    startScheduler(tokenValidity, tokenValidity);
+  }
+
+  /**
+   * Initializes the secrets array.  This should typically be called only once,
+   * during init but some implementations may wish to call it other times.
+   * previousSecret can be null if there isn't a previous secret, but
+   * currentSecret should never be null.
+   * @param currentSecret The current secret
+   * @param previousSecret The previous secret
+   */
+  protected void initSecrets(byte[] currentSecret, byte[] previousSecret) {
+    secrets = new byte[][]{currentSecret, previousSecret};
+  }
+
+  /**
+   * Starts the scheduler for the rollover to run at an interval.
+   * @param initialDelay The initial delay in the rollover in milliseconds
+   * @param period The interval for the rollover in milliseconds
+   */
+  protected synchronized void startScheduler(long initialDelay, long period) {
+    if (!schedulerRunning) {
+      schedulerRunning = true;
+      scheduler = Executors.newSingleThreadScheduledExecutor();
+      scheduler.scheduleAtFixedRate(new Runnable() {
+        @Override
+        public void run() {
+          rollSecret();
+        }
+      }, initialDelay, period, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  @Override
+  public synchronized void destroy() {
+    if (!isDestroyed) {
+      isDestroyed = true;
+      if (scheduler != null) {
+        scheduler.shutdown();
+      }
+      schedulerRunning = false;
+      super.destroy();
+    }
+  }
+
+  /**
+   * Rolls the secret.  It is called automatically at the rollover interval.
+   */
+  protected synchronized void rollSecret() {
+    if (!isDestroyed) {
+      LOG.debug("rolling secret");
+      byte[] newSecret = generateNewSecret();
+      secrets = new byte[][]{newSecret, secrets[0]};
+    }
+  }
+
+  /**
+   * Subclasses should implement this to return a new secret.  It will be called
+   * automatically at the secret rollover interval. It should never return null.
+   * @return a new secret
+   */
+  protected abstract byte[] generateNewSecret();
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secrets[0];
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

+ 32 - 14
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java

@@ -24,18 +24,19 @@ import java.security.NoSuchAlgorithmException;
 public class Signer {
   private static final String SIGNATURE = "&s=";
 
-  private byte[] secret;
+  private SignerSecretProvider secretProvider;
 
   /**
-   * Creates a Signer instance using the specified secret.
+   * Creates a Signer instance using the specified SignerSecretProvider.  The
+   * SignerSecretProvider should already be initialized.
    *
-   * @param secret secret to use for creating the digest.
+   * @param secretProvider The SignerSecretProvider to use
    */
-  public Signer(byte[] secret) {
-    if (secret == null) {
-      throw new IllegalArgumentException("secret cannot be NULL");
+  public Signer(SignerSecretProvider secretProvider) {
+    if (secretProvider == null) {
+      throw new IllegalArgumentException("secretProvider cannot be NULL");
     }
-    this.secret = secret.clone();
+    this.secretProvider = secretProvider;
   }
 
   /**
@@ -47,11 +48,12 @@ public class Signer {
    *
    * @return the signed string.
    */
-  public String sign(String str) {
+  public synchronized String sign(String str) {
     if (str == null || str.length() == 0) {
       throw new IllegalArgumentException("NULL or empty string to sign");
     }
-    String signature = computeSignature(str);
+    byte[] secret = secretProvider.getCurrentSecret();
+    String signature = computeSignature(secret, str);
     return str + SIGNATURE + signature;
   }
 
@@ -71,21 +73,19 @@ public class Signer {
     }
     String originalSignature = signedStr.substring(index + SIGNATURE.length());
     String rawValue = signedStr.substring(0, index);
-    String currentSignature = computeSignature(rawValue);
-    if (!originalSignature.equals(currentSignature)) {
-      throw new SignerException("Invalid signature");
-    }
+    checkSignatures(rawValue, originalSignature);
     return rawValue;
   }
 
   /**
    * Returns then signature of a string.
    *
+   * @param secret The secret to use
    * @param str string to sign.
    *
    * @return the signature for the string.
    */
-  protected String computeSignature(String str) {
+  protected String computeSignature(byte[] secret, String str) {
     try {
       MessageDigest md = MessageDigest.getInstance("SHA");
       md.update(str.getBytes());
@@ -97,4 +97,22 @@ public class Signer {
     }
   }
 
+  protected void checkSignatures(String rawValue, String originalSignature)
+      throws SignerException {
+    boolean isValid = false;
+    byte[][] secrets = secretProvider.getAllSecrets();
+    for (int i = 0; i < secrets.length; i++) {
+      byte[] secret = secrets[i];
+      if (secret != null) {
+        String currentSignature = computeSignature(secret, rawValue);
+        if (originalSignature.equals(currentSignature)) {
+          isValid = true;
+          break;
+        }
+      }
+    }
+    if (!isValid) {
+      throw new SignerException("Invalid signature");
+    }
+  }
 }

+ 62 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The SignerSecretProvider is an abstract way to provide a secret to be used
+ * by the Signer so that we can have different implementations that potentially
+ * do more complicated things in the backend.
+ * See the RolloverSignerSecretProvider class for an implementation that
+ * supports rolling over the secret at a regular interval.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public abstract class SignerSecretProvider {
+
+  /**
+   * Initialize the SignerSecretProvider
+   * @param config filter configuration
+   * @param tokenValidity The amount of time a token is valid for
+   * @throws Exception
+   */
+  public abstract void init(Properties config, long tokenValidity)
+      throws Exception;
+
+  /**
+   * Will be called on shutdown; subclasses should perform any cleanup here.
+   */
+  public void destroy() {}
+
+  /**
+   * Returns the current secret to be used by the Signer for signing new
+   * cookies.  This should never return null.
+   * <p>
+   * Callers should be careful not to modify the returned value.
+   * @return the current secret
+   */
+  public abstract byte[] getCurrentSecret();
+
+  /**
+   * Returns all secrets that a cookie could have been signed with and are still
+   * valid; this should include the secret returned by getCurrentSecret().
+   * <p>
+   * Callers should be careful not to modify the returned value.
+   * @return the secrets
+   */
+  public abstract byte[][] getAllSecrets();
+}

+ 49 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A SignerSecretProvider that simply creates a secret based on a given String.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class StringSignerSecretProvider extends SignerSecretProvider {
+
+  private byte[] secret;
+  private byte[][] secrets;
+
+  public StringSignerSecretProvider(String secretStr) {
+    secret = secretStr.getBytes();
+    secrets = new byte[][]{secret};
+  }
+
+  @Override
+  public void init(Properties config, long tokenValidity) throws Exception {
+    // do nothing
+  }
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secret;
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

+ 0 - 5
hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm

@@ -18,8 +18,6 @@
 
 Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
 
-  \[ {{{./index.html}Go Back}} \]
-
 * Requirements
 
   * Java 6+
@@ -70,6 +68,3 @@ $ mvn package -Pdocs
 
   The generated documentation is available at
   <<<hadoop-auth/target/site/>>>.
-
-  \[ {{{./index.html}Go Back}} \]
-

+ 0 - 4
hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm

@@ -20,8 +20,6 @@
 Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
 Configuration
 
-  \[ {{{./index.html}Go Back}} \]
-
 * Server Side Configuration Setup
 
   The AuthenticationFilter filter is Hadoop Auth's server side component.
@@ -241,5 +239,3 @@ Configuration
     ...
 </web-app>
 +---+
-
-  \[ {{{./index.html}Go Back}} \]

+ 0 - 4
hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm

@@ -18,8 +18,6 @@
 
 Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
 
-  \[ {{{./index.html}Go Back}} \]
-
 * Accessing a Hadoop Auth protected URL Using a browser
 
   <<IMPORTANT:>> The browser must support HTTP Kerberos SPNEGO. For example,
@@ -133,5 +131,3 @@ You are: user[tucu] principal[tucu@LOCALHOST]
 ....
 
 +---+
-
-  \[ {{{./index.html}Go Back}} \]

+ 8 - 30
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -33,36 +33,6 @@ public class TestAuthenticatedURL {
     token = new AuthenticatedURL.Token("foo");
     Assert.assertTrue(token.isSet());
     Assert.assertEquals("foo", token.toString());
-
-    AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
-    AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token();
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token();
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("bar");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("bar");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
   }
 
   @Test
@@ -137,4 +107,12 @@ public class TestAuthenticatedURL {
     Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
   }
 
+  @Test
+  public void testGetAuthenticator() throws Exception {
+    Authenticator authenticator = Mockito.mock(Authenticator.class);
+
+    AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
+    Assert.assertEquals(authenticator, aURL.getAuthenticator());
+  }
+
 }

+ 143 - 22
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -23,6 +23,7 @@ import java.util.Vector;
 
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
@@ -33,6 +34,8 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -74,6 +77,8 @@ public class TestAuthenticationFilter {
       Assert.fail();
     } catch (ServletException ex) {
       // Expected
+      Assert.assertEquals("Authentication type must be specified: simple|kerberos|<class>", 
+          ex.getMessage());
     } catch (Exception ex) {
       Assert.fail();
     } finally {
@@ -155,9 +160,14 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       Assert.assertTrue(filter.isRandomSecret());
+      Assert.assertFalse(filter.isCustomSignerSecretProvider());
       Assert.assertNull(filter.getCookieDomain());
       Assert.assertNull(filter.getCookiePath());
       Assert.assertEquals(TOKEN_VALIDITY_SEC, filter.getValidity());
@@ -165,6 +175,26 @@ public class TestAuthenticationFilter {
       filter.destroy();
     }
 
+    // string secret
+    filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
+      filter.init(config);
+      Assert.assertFalse(filter.isRandomSecret());
+      Assert.assertFalse(filter.isCustomSignerSecretProvider());
+    } finally {
+      filter.destroy();
+    }
+
     // custom secret
     filter = new AuthenticationFilter();
     try {
@@ -174,8 +204,26 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(
+            new SignerSecretProvider() {
+              @Override
+              public void init(Properties config, long tokenValidity) {
+              }
+              @Override
+              public byte[] getCurrentSecret() {
+                return null;
+              }
+              @Override
+              public byte[][] getAllSecrets() {
+                return null;
+              }
+            });
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       Assert.assertFalse(filter.isRandomSecret());
+      Assert.assertTrue(filter.isCustomSignerSecretProvider());
     } finally {
       filter.destroy();
     }
@@ -191,6 +239,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_PATH)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
       Assert.assertEquals("/bar", filter.getCookiePath());
@@ -211,6 +263,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       Assert.assertTrue(DummyAuthenticationHandler.init);
     } finally {
@@ -233,6 +289,31 @@ public class TestAuthenticationFilter {
       filter.destroy();
     }
   }
+  
+  @Test
+  public void testInitCaseSensitivity() throws Exception {
+    // minimal configuration & simple auth handler (Pseudo)
+    AuthenticationFilter filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("SimPle");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
+          (new Long(TOKEN_VALIDITY_SEC)).toString());
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+          new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+              AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
+
+      filter.init(config);
+      Assert.assertEquals(PseudoAuthenticationHandler.class, 
+          filter.getAuthenticationHandler().getClass());
+    } finally {
+      filter.destroy();
+    }
+  }
 
   @Test
   public void testGetRequestURL() throws Exception {
@@ -247,6 +328,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -274,11 +359,15 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -307,12 +396,16 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       AuthenticationToken token =
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -348,11 +441,15 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -386,6 +483,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -435,6 +536,10 @@ public class TestAuthenticationFilter {
             AuthenticationFilter.AUTH_TOKEN_VALIDITY,
             AuthenticationFilter.SIGNATURE_SECRET, "management.operation" +
             ".return", "expired.token")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
 
     if (withDomainPath) {
       Mockito.when(config.getInitParameter(AuthenticationFilter
@@ -488,7 +593,7 @@ public class TestAuthenticationFilter {
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
                 Mockito.any(ServletResponse.class));
 
-        Signer signer = new Signer("secret".getBytes());
+        Signer signer = new Signer(new StringSignerSecretProvider("secret"));
         String value = signer.verifyAndExtract(v);
         AuthenticationToken token = AuthenticationToken.parse(value);
         assertThat(token.getExpires(), not(0L));
@@ -508,21 +613,17 @@ public class TestAuthenticationFilter {
 
   private static void parseCookieMap(String cookieHeader, HashMap<String,
           String> cookieMap) {
-    for (String pair : cookieHeader.split(";")) {
-      String p = pair.trim();
-      int idx = p.indexOf('=');
-      final String k, v;
-      if (idx == -1) {
-        k = p;
-        v = null;
-      } else if (idx == p.length()) {
-        k = p.substring(0, idx - 1);
-        v = null;
-      } else {
-        k = p.substring(0, idx);
-        v = p.substring(idx + 1);
+    List<HttpCookie> cookies = HttpCookie.parse(cookieHeader);
+    for (HttpCookie cookie : cookies) {
+      if (AuthenticatedURL.AUTH_COOKIE.equals(cookie.getName())) {
+        cookieMap.put(cookie.getName(), cookie.getValue());
+        if (cookie.getPath() != null) {
+          cookieMap.put("Path", cookie.getPath());
+        }
+        if (cookie.getDomain() != null) {
+          cookieMap.put("Domain", cookie.getDomain());
+        }
       }
-      cookieMap.put(k, v);
     }
   }
 
@@ -559,6 +660,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -566,7 +671,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -609,6 +714,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -672,6 +781,10 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -679,7 +792,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(secret.getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider(secret));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -739,6 +852,10 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -746,7 +863,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(secret.getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider(secret));
       String tokenSigned = signer.sign(token.toString());
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -774,6 +891,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -793,7 +914,7 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});

+ 69 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -18,6 +18,7 @@ import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
@@ -30,10 +31,18 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.ietf.jgss.Oid;
 
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.File;
+import java.security.Principal;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 import java.util.concurrent.Callable;
 
 public class TestKerberosAuthenticationHandler
@@ -110,8 +119,65 @@ public class TestKerberosAuthenticationHandler
 
   @Test(timeout=60000)
   public void testInit() throws Exception {
-    Assert.assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
     Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
+    Set<KerberosPrincipal> principals = handler.getPrincipals();
+    Principal expectedPrincipal =
+        new KerberosPrincipal(KerberosTestUtils.getServerPrincipal());
+    Assert.assertTrue(principals.contains(expectedPrincipal));
+    Assert.assertEquals(1, principals.size());
+  }
+
+  // dynamic configuration of HTTP principals
+  @Test(timeout=60000)
+  public void testDynamicPrincipalDiscovery() throws Exception {
+    String[] keytabUsers = new String[]{
+        "HTTP/host1", "HTTP/host2", "HTTP2/host1", "XHTTP/host"
+    };
+    String keytab = KerberosTestUtils.getKeytabFile();
+    getKdc().createPrincipal(new File(keytab), keytabUsers);
+
+    // destroy handler created in setUp()
+    handler.destroy();
+    Properties props = new Properties();
+    props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
+    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
+    handler = getNewAuthenticationHandler();
+    handler.init(props);
+
+    Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());    
+    
+    Set<KerberosPrincipal> loginPrincipals = handler.getPrincipals();
+    for (String user : keytabUsers) {
+      Principal principal = new KerberosPrincipal(
+          user + "@" + KerberosTestUtils.getRealm());
+      boolean expected = user.startsWith("HTTP/");
+      Assert.assertEquals("checking for "+user, expected, 
+          loginPrincipals.contains(principal));
+    }
+  }
+
+  // dynamic configuration of HTTP principals
+  @Test(timeout=60000)
+  public void testDynamicPrincipalDiscoveryMissingPrincipals() throws Exception {
+    String[] keytabUsers = new String[]{"hdfs/localhost"};
+    String keytab = KerberosTestUtils.getKeytabFile();
+    getKdc().createPrincipal(new File(keytab), keytabUsers);
+
+    // destroy handler created in setUp()
+    handler.destroy();
+    Properties props = new Properties();
+    props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
+    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
+    handler = getNewAuthenticationHandler();
+    try {
+      handler.init(props);
+      Assert.fail("init should have failed");
+    } catch (ServletException ex) {
+      Assert.assertEquals("Principals do not exist in the keytab",
+          ex.getCause().getMessage());
+    } catch (Throwable t) {
+      Assert.fail("wrong exception: "+t);
+    }
   }
 
   @Test(timeout=60000)
@@ -190,7 +256,8 @@ public class TestKerberosAuthenticationHandler
 
     Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION))
       .thenReturn(KerberosAuthenticator.NEGOTIATE + " " + token);
-
+    Mockito.when(request.getServerName()).thenReturn("localhost");
+    
     AuthenticationToken authToken = handler.authenticate(request, response);
 
     if (authToken != null) {

+ 16 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -91,6 +91,22 @@ public class TestKerberosName {
     checkBadTranslation("root/joe@FOO.COM");
   }
 
+  @Test
+  public void testToLowerCase() throws Exception {
+    String rules =
+        "RULE:[1:$1]/L\n" +
+        "RULE:[2:$1]/L\n" +
+        "RULE:[2:$1;$2](^.*;admin$)s/;admin$///L\n" +
+        "RULE:[2:$1;$2](^.*;guest$)s/;guest$//g/L\n" +
+        "DEFAULT";
+    KerberosName.setRules(rules);
+    KerberosName.printRules();
+    checkTranslation("Joe@FOO.COM", "joe");
+    checkTranslation("Joe/root@FOO.COM", "joe");
+    checkTranslation("Joe/admin@FOO.COM", "joe");
+    checkTranslation("Joe/guestguest@FOO.COM", "joe");
+  }
+
   @After
   public void clear() {
     System.clearProperty("java.security.krb5.realm");

+ 108 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java

@@ -16,13 +16,39 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
-import org.junit.Assert;
-
+import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
 
+import org.apache.directory.server.kerberos.shared.keytab.Keytab;
+import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
+import org.apache.directory.shared.kerberos.KerberosTime;
+import org.apache.directory.shared.kerberos.codec.types.EncryptionType;
+import org.apache.directory.shared.kerberos.components.EncryptionKey;
+import org.junit.After;
+import org.junit.Assert;
 import org.junit.Test;
 
 public class TestKerberosUtil {
+  static String testKeytab = "test.keytab";
+  static String[] testPrincipals = new String[]{
+      "HTTP@testRealm",
+      "test/testhost@testRealm",
+      "HTTP/testhost@testRealm",
+      "HTTP1/testhost@testRealm",
+      "HTTP/testhostanother@testRealm"
+  };
+
+  @After
+  public void deleteKeytab() {
+    File keytabFile = new File(testKeytab);
+    if (keytabFile.exists()){
+      keytabFile.delete();
+    }
+  }
 
   @Test
   public void testGetServerPrincipal() throws IOException {
@@ -51,4 +77,84 @@ public class TestKerberosUtil {
         service + "/" + testHost.toLowerCase(),
         KerberosUtil.getServicePrincipal(service, testHost.toLowerCase()));
   }
+  
+  @Test
+  public void testGetPrincipalNamesMissingKeytab() {
+    try {
+      KerberosUtil.getPrincipalNames(testKeytab);
+      Assert.fail("Exception should have been thrown");
+    } catch (IOException e) {
+      //expects exception
+    }
+  }
+
+  @Test
+  public void testGetPrincipalNamesMissingPattern() throws IOException {
+    createKeyTab(testKeytab, new String[]{"test/testhost@testRealm"});
+    try {
+      KerberosUtil.getPrincipalNames(testKeytab, null);
+      Assert.fail("Exception should have been thrown");
+    } catch (Exception e) {
+      //expects exception
+    }
+  }
+
+  @Test
+  public void testGetPrincipalNamesFromKeytab() throws IOException {
+    createKeyTab(testKeytab, testPrincipals); 
+    // read all principals in the keytab file
+    String[] principals = KerberosUtil.getPrincipalNames(testKeytab);
+    Assert.assertNotNull("principals cannot be null", principals);
+    
+    int expectedSize = 0;
+    List<String> principalList = Arrays.asList(principals);
+    for (String principal : testPrincipals) {
+      Assert.assertTrue("missing principal "+principal,
+          principalList.contains(principal));
+      expectedSize++;
+    }
+    Assert.assertEquals(expectedSize, principals.length);
+  }
+  
+  @Test
+  public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
+    createKeyTab(testKeytab, testPrincipals); 
+    // read the keytab file
+    // look for principals with HTTP as the first part
+    Pattern httpPattern = Pattern.compile("HTTP/.*");
+    String[] httpPrincipals =
+        KerberosUtil.getPrincipalNames(testKeytab, httpPattern);
+    Assert.assertNotNull("principals cannot be null", httpPrincipals);
+    
+    int expectedSize = 0;
+    List<String> httpPrincipalList = Arrays.asList(httpPrincipals);
+    for (String principal : testPrincipals) {
+      if (httpPattern.matcher(principal).matches()) {
+        Assert.assertTrue("missing principal "+principal,
+            httpPrincipalList.contains(principal));
+        expectedSize++;
+      }
+    }
+    Assert.assertEquals(expectedSize, httpPrincipals.length);
+  }
+  
+  private void createKeyTab(String fileName, String[] principalNames)
+      throws IOException {
+    //create a test keytab file
+    List<KeytabEntry> lstEntries = new ArrayList<KeytabEntry>();
+    for (String principal : principalNames){
+      // create 3 versions of the key to ensure methods don't return
+      // duplicate principals
+      for (int kvno=1; kvno <= 3; kvno++) {
+        EncryptionKey key = new EncryptionKey(
+            EncryptionType.UNKNOWN, "samplekey1".getBytes(), kvno);
+        KeytabEntry keytabEntry = new KeytabEntry(
+            principal, 1 , new KerberosTime(), (byte) 1, key);
+        lstEntries.add(keytabEntry);      
+      }
+    }
+    Keytab keytab = Keytab.getInstance();
+    keytab.setEntries(lstEntries);
+    keytab.write(new File(testKeytab));
+  }
 }

+ 63 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Random;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRandomSignerSecretProvider {
+
+  @Test
+  public void testGetAndRollSecrets() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    // use the same seed so we can predict the RNG
+    long seed = System.currentTimeMillis();
+    Random rand = new Random(seed);
+    byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
+    RandomSignerSecretProvider secretProvider =
+        new RandomSignerSecretProvider(seed);
+    try {
+      secretProvider.init(null, rolloverFrequency);
+
+      byte[] currentSecret = secretProvider.getCurrentSecret();
+      byte[][] allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret1, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret1, allSecrets[0]);
+      Assert.assertNull(allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret2, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret2, allSecrets[0]);
+      Assert.assertArrayEquals(secret1, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret3, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret3, allSecrets[0]);
+      Assert.assertArrayEquals(secret2, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+    } finally {
+      secretProvider.destroy();
+    }
+  }
+}

+ 79 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java

@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRolloverSignerSecretProvider {
+
+  @Test
+  public void testGetAndRollSecrets() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    byte[] secret1 = "doctor".getBytes();
+    byte[] secret2 = "who".getBytes();
+    byte[] secret3 = "tardis".getBytes();
+    TRolloverSignerSecretProvider secretProvider =
+        new TRolloverSignerSecretProvider(
+            new byte[][]{secret1, secret2, secret3});
+    try {
+      secretProvider.init(null, rolloverFrequency);
+
+      byte[] currentSecret = secretProvider.getCurrentSecret();
+      byte[][] allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret1, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret1, allSecrets[0]);
+      Assert.assertNull(allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret2, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret2, allSecrets[0]);
+      Assert.assertArrayEquals(secret1, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret3, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret3, allSecrets[0]);
+      Assert.assertArrayEquals(secret2, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+    } finally {
+      secretProvider.destroy();
+    }
+  }
+
+  class TRolloverSignerSecretProvider extends RolloverSignerSecretProvider {
+
+    private byte[][] newSecretSequence;
+    private int newSecretSequenceIndex;
+
+    public TRolloverSignerSecretProvider(byte[][] newSecretSequence)
+        throws Exception {
+      super();
+      this.newSecretSequence = newSecretSequence;
+      this.newSecretSequenceIndex = 0;
+    }
+
+    @Override
+    protected byte[] generateNewSecret() {
+      return newSecretSequence[newSecretSequenceIndex++];
+    }
+
+  }
+}

+ 69 - 16
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java

@@ -13,24 +13,15 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import java.util.Properties;
 import org.junit.Assert;
 import org.junit.Test;
 
 public class TestSigner {
 
-  @Test
-  public void testNoSecret() throws Exception {
-    try {
-      new Signer(null);
-      Assert.fail();
-    }
-    catch (IllegalArgumentException ex) {
-    }
-  }
-
   @Test
   public void testNullAndEmptyString() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     try {
       signer.sign(null);
       Assert.fail();
@@ -51,17 +42,17 @@ public class TestSigner {
 
   @Test
   public void testSignature() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String s1 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s3 = signer.sign("wrong");
     Assert.assertEquals(s1, s2);
-    Assert.assertNotSame(s1, s3);
+    Assert.assertNotEquals(s1, s3);
   }
 
   @Test
   public void testVerify() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String t = "test";
     String s = signer.sign(t);
     String e = signer.verifyAndExtract(s);
@@ -70,7 +61,7 @@ public class TestSigner {
 
   @Test
   public void testInvalidSignedText() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     try {
       signer.verifyAndExtract("test");
       Assert.fail();
@@ -83,7 +74,7 @@ public class TestSigner {
 
   @Test
   public void testTampering() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String t = "test";
     String s = signer.sign(t);
     s += "x";
@@ -96,4 +87,66 @@ public class TestSigner {
       Assert.fail();
     }
   }
+
+  @Test
+  public void testMultipleSecrets() throws Exception {
+    TestSignerSecretProvider secretProvider = new TestSignerSecretProvider();
+    Signer signer = new Signer(secretProvider);
+    secretProvider.setCurrentSecret("secretB");
+    String t1 = "test";
+    String s1 = signer.sign(t1);
+    String e1 = signer.verifyAndExtract(s1);
+    Assert.assertEquals(t1, e1);
+    secretProvider.setPreviousSecret("secretA");
+    String t2 = "test";
+    String s2 = signer.sign(t2);
+    String e2 = signer.verifyAndExtract(s2);
+    Assert.assertEquals(t2, e2);
+    Assert.assertEquals(s1, s2); //check is using current secret for signing
+    secretProvider.setCurrentSecret("secretC");
+    secretProvider.setPreviousSecret("secretB");
+    String t3 = "test";
+    String s3 = signer.sign(t3);
+    String e3 = signer.verifyAndExtract(s3);
+    Assert.assertEquals(t3, e3);
+    Assert.assertNotEquals(s1, s3); //check not using current secret for signing
+    String e1b = signer.verifyAndExtract(s1);
+    Assert.assertEquals(t1, e1b); // previous secret still valid
+    secretProvider.setCurrentSecret("secretD");
+    secretProvider.setPreviousSecret("secretC");
+    try {
+      signer.verifyAndExtract(s1);  // previous secret no longer valid
+      Assert.fail();
+    } catch (SignerException ex) {
+      // Expected
+    }
+  }
+
+  class TestSignerSecretProvider extends SignerSecretProvider {
+
+    private byte[] currentSecret;
+    private byte[] previousSecret;
+
+    @Override
+    public void init(Properties config, long tokenValidity) {
+    }
+
+    @Override
+    public byte[] getCurrentSecret() {
+      return currentSecret;
+    }
+
+    @Override
+    public byte[][] getAllSecrets() {
+      return new byte[][]{currentSecret, previousSecret};
+    }
+
+    public void setCurrentSecret(String secretStr) {
+      currentSecret = secretStr.getBytes();
+    }
+
+    public void setPreviousSecret(String previousSecretStr) {
+      previousSecret = previousSecretStr.getBytes();
+    }
+  }
 }

+ 33 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java

@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestStringSignerSecretProvider {
+
+  @Test
+  public void testGetSecrets() throws Exception {
+    String secretStr = "secret";
+    StringSignerSecretProvider secretProvider
+        = new StringSignerSecretProvider(secretStr);
+    secretProvider.init(null, -1);
+    byte[] secretBytes = secretStr.getBytes();
+    Assert.assertArrayEquals(secretBytes, secretProvider.getCurrentSecret());
+    byte[][] allSecrets = secretProvider.getAllSecrets();
+    Assert.assertEquals(1, allSecrets.length);
+    Assert.assertArrayEquals(secretBytes, allSecrets[0]);
+  }
+}

+ 599 - 16
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -7,8 +7,27 @@ Trunk (Unreleased)
     HADOOP-8124. Remove the deprecated FSDataOutputStream constructor,
     FSDataOutputStream.sync() and Syncable.sync().  (szetszwo)
 
+    HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
+
+    HADOOP-9902. Shell script rewrite (aw)
+
   NEW FEATURES
+
+    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
+
+    HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
+    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
+    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
+    Alexander Stojanovic, Brian Swan, and Min Wei via cnauroth)
+
+    HADOOP-10728. Metrics system for Windows Azure Storage Filesystem.
+    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
+    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
+    Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
     
+    HADOOP-10719. Add generateEncryptedKey and decryptEncryptedKey 
+    methods to KeyProvider. (asuresh via tucu)
+
   IMPROVEMENTS
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -19,12 +38,8 @@ Trunk (Unreleased)
 
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
 
-    HADOOP-7664. Remove warmings when overriding final parameter configuration
-    if the override value is same as the final parameter value.
-    (Ravi Prakash via suresh)
-
-    HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin Jetly
-    via jitendra)
+    HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin 
+    Jetly via jitendra)
 
     HADOOP-7757. Test file reference count is at least 3x actual value (Jon
     Eagles via bobby)
@@ -121,9 +136,6 @@ Trunk (Unreleased)
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     build a new UGI. (Larry McCay via omalley)
 
-    HADOOP-9968. Makes ProxyUsers to work with NetGroups (Benoy Antony via 
-    ddas)
-
     HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions 
     correctly. (Larry McCay via omalley)
 
@@ -141,6 +153,61 @@ Trunk (Unreleased)
     HADOOP-10430. KeyProvider Metadata should have an optional description, 
     there should be a method to retrieve the metadata from all keys. (tucu)
 
+    HADOOP-10534. KeyProvider getKeysMetadata should take a list of names 
+    rather than returning all keys. (omalley)
+
+    HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
+
+    HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
+
+    HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. 
+    (tucu)
+
+    HADOOP-10695. KMSClientProvider should respect a configurable timeout. 
+    (yoderme via tucu)
+
+    HADOOP-10757. KeyProvider KeyVersion should provide the key name. 
+    (asuresh via tucu)
+
+    HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
+    (Arun Suresh via atm)
+
+    HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
+    KeyProvider. (wang)
+
+    HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
+
+    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
+
+    HADOOP-10841. EncryptedKeyVersion should have a key name property. 
+    (asuresh via tucu)
+
+    HADOOP-10842. CryptoExtension generateEncryptedKey method should 
+    receive the key name. (asuresh via tucu)
+
+    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common. 
+    (asuresh via tucu)
+
+    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
+    in the REST API. (asuresh via tucu)
+
+    HADOOP-10891. Add EncryptedKeyVersion factory method to
+    KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10756. KMS audit log should consolidate successful similar requests. 
+    (asuresh via tucu)
+
+    HADOOP-10793. KeyShell args should use single-dash style. (wang)
+
+    HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
+
+    HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting 
+    underlying store. (asuresh via tucu)
+
+    HADOOP-10770. KMS add delegation token support. (tucu)
+
+    HADOOP-10698. KMS, add proxyuser support. (tucu)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -289,9 +356,6 @@ Trunk (Unreleased)
     HADOOP-9394. Port findHangingTest.sh from HBase to Hadoop. (Andrew Wang
     via atm)
 
-    HADOOP-9099. NetUtils.normalizeHostName fails on domains where 
-    UnknownHost resolves to an IP address. (Ivan Mitic via suresh)
-
     HADOOP-9431 TestSecurityUtil#testLocalHostNameForNullOrWild on systems where hostname
     contains capital letters  (Chris Nauroth via sanjay)
 
@@ -319,22 +383,264 @@ Trunk (Unreleased)
 
     HADOOP-10431. Change visibility of KeyStore.Options getter methods to public. (tucu)
 
+    HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
+
+    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
+
+    HADOOP-10625. Trim configuration names when putting/getting them
+    to properties. (Wangda Tan via xgong)
+
+    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
+
+    HADOOP-10611. KMS, keyVersion name should not be assumed to be 
+    keyName@versionNumber. (tucu)
+
+    HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
+    going remote. (Dapeng Sun via wheat9)
+
+    HADOOP-10689. InputStream is not closed in
+    AzureNativeFileSystemStore#retrieve(). (Chen He via cnauroth)
+
+    HADOOP-10690. Lack of synchronization on access to InputStream in
+    NativeAzureFileSystem#NativeAzureFsInputStream#close().
+    (Chen He via cnauroth)
+
+    HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
+
+    HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
+
+    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
+    (Mike Yoder via wang)
+
+    HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
+    System. (Shanyu Zhao via cnauroth)
+
+    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is 
+    thread-unsafe. (benoyantony viat tucu)
+
+    HADOOP-10881. Clarify usage of encryption and encrypted encryption
+    key in KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
+    (Akira Ajisaka via wang)
+
+    HADOOP-10925. Compilation fails in native link0 function on Windows.
+    (cnauroth)
+
+    HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
+    length keys. (Arun Suresh via wang)
+
+    HADOOP-10862. Miscellaneous trivial corrections to KMS classes. 
+    (asuresh via tucu)
+
+    HADOOP-10967. Improve DefaultCryptoExtension#generateEncryptedKey 
+    performance. (hitliuyi via tucu)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
     HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
-Release 2.5.0 - UNRELEASED
+Release 2.6.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
-    HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-10808. Remove unused native code for munlock. (cnauroth)
+
+    HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
+
+    HADOOP-7664. Remove warmings when overriding final parameter configuration
+    if the override value is same as the final parameter value.
+    (Ravi Prakash via suresh)
+
+    HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
+    via jing9)
+
+    HADOOP-10845. Add common tests for ACLs in combination with viewfs.
+    (Stephen Chu via cnauroth)
+
+    HADOOP-10839. Add unregisterSource() to MetricsSystem API.
+    (Shanyu Zhao via cnauroth)
+
+    HADOOP-10607. Create an API to separate credentials/password storage
+    from applications (Larry McCay via omalley)
+
+    HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
+
+    HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
+    omalley)
+
+    HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
+    (Ted Malaska via atm)
+
+    HADOOP-10817. ProxyUsers configuration should support configurable 
+    prefixes. (tucu)
+
+    HADOOP-10755. Support negative caching of user-group mapping.
+    (Lei Xu via wang)
+
+    HADOOP-10855. Allow Text to be read with a known Length. (todd)
+
+    HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem
+    internal dir behavior consistent. (Stephen Chu via wang)
+
+    HADOOP-10882. Move DirectBufferPool into common util. (todd)
+
+    HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
+    Arpit Agarwal)
+
+    HADOOP-10902. Deletion of directories with snapshots will not output
+    reason for trash move failure. (Stephen Chu via wang)
+
+    HADOOP-10900. CredentialShell args should use single-dash style. (wang)
+
+    HADOOP-10903. Enhance hadoop classpath command to expand wildcards or write
+    classpath into jar manifest. (cnauroth)
+
+    HADOOP-10791. AuthenticationFilter should support externalizing the 
+    secret for signing and provide rotation support. (rkanter via tucu)
+
+    HADOOP-10771. Refactor HTTP delegation support out of httpfs to common. 
+    (tucu)
+
+    HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication 
+    client/server libraries. (tucu)
+
+    HADOOP-10820. Throw an exception in GenericOptionsParser when passed
+    an empty Path. (Alex Holmes and Zhihai Xu via wang)
+
+    HADOOP-10281. Create a scheduler, which assigns schedulables a priority
+    level. (Chris Li via Arpit Agarwal)
+
+    HADOOP-8944. Shell command fs -count should include human readable option 
+    (Jonathan Allen via aw)
+
+    HADOOP-10231. Add some components in Native Libraries document (Akira 
+    AJISAKA via aw)
+
+    HADOOP-10650. Add ability to specify a reverse ACL (black list) of users
+    and groups. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10335. An ip whilelist based implementation to resolve Sasl
+    properties per connection. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10975. org.apache.hadoop.util.DataChecksum should support calculating
+    checksums in native code (James Thomas via Colin Patrick McCabe)
+
+  OPTIMIZATIONS
+
+    HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
+
+  BUG FIXES
+
+    HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
+    Sivachenko via Colin Patrick McCabe)
+
+    HADOOP-10507. FsShell setfacl can throw ArrayIndexOutOfBoundsException when
+    no perm is specified. (Stephen Chu and Sathish Gurram via cnauroth)
+
+    HADOOP-10780. hadoop_user_info_alloc fails on FreeBSD due to incorrect
+    sysconf use (Dmitry Sivachenko via Colin Patrick McCabe)
+
+    HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
+
+    HADOOP-9921. daemon scripts should remove pid file on stop call after stop
+    or process is found not running ( vinayakumarb )
+
+    HADOOP-10591.  Compression codecs must used pooled direct buffers or
+    deallocate direct buffers when stream is closed (cmccabe)
+
+    HADOOP-10857.  Native Libraries Guide doen't mention a dependency on
+    openssl-development package (ozawa via cmccabe)
+
+    HADOOP-10866. RawLocalFileSystem fails to read symlink targets via the stat
+    command when the format of the stat command uses non-curly quotes (yzhang
+    via cmccabe)
+
+    HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
+    (Benoy Antony via umamahesh)
+
+    HADOOP-10928. Incorrect usage on `hadoop credential list`.
+    (Josh Elser via wang)
+
+    HADOOP-10927. Fix CredentialShell help behavior and error codes.
+    (Josh Elser via wang)
+
+    HADOOP-10937. Need to set version name correctly before decrypting EEK.
+    (Arun Suresh via wang)
+
+    HADOOP-10918. JMXJsonServlet fails when used within Tomcat. (tucu)
+
+    HADOOP-10933. FileBasedKeyStoresFactory Should use Configuration.getPassword 
+    for SSL Passwords. (lmccay via tucu)
+
+    HADOOP-10759. Remove hardcoded JAVA_HEAP_MAX. (Sam Liu via Eric Yang)
+
+    HADOOP-10905. LdapGroupsMapping Should use configuration.getPassword for SSL
+    and LDAP Passwords. (lmccay via brandonli)
+
+    HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
+
+    HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
+    (lmccay via brandonli)
+
+    HADOOP-10402. Configuration.getValByRegex does not substitute for
+    variables. (Robert Kanter via kasha)
+
+    HADOOP-10851. NetgroupCache does not remove group memberships. (Benoy
+    Antony via Arpit Agarwal)
+
+    HADOOP-10962. Flags for posix_fadvise are not valid in some architectures
+    (David Villegas via Colin Patrick McCabe)
+
+    HADOOP-10966. Hadoop Common native compilation broken in windows.
+    (David Villegas via Arpit Agarwal)
+
+    HADOOP-10843. TestGridmixRecord unit tests failure on PowerPC (Jinghui Wang
+    via Colin Patrick McCabe)
+
+    HADOOP-10121. Fix javadoc spelling for HadoopArchives#writeTopLevelDirs
+    (Akira AJISAKA via aw)
+
+    HADOOP-10964. Small fix for NetworkTopologyWithNodeGroup#sortByDistance.
+    (Yi Liu via wang)
+
+    HADOOP-10059. RPC authentication and authorization metrics overflow to
+    negative values on busy clusters (Tsuyoshi OZAWA and Akira AJISAKA
+    via jlowe)
+
+    HADOOP-10973. Native Libraries Guide contains format error. (Peter Klavins
+    via Arpit Agarwal)
+
+    HADOOP-10972. Native Libraries Guide contains mis-spelt build line (Peter
+    Klavins via aw)
+
+    HADOOP-10873. Fix dead link in Configuration javadoc (Akira AJISAKA 
+    via aw)
+
+    HADOOP-10968. hadoop native build fails to detect java_libarch on
+    ppc64le (Dinar Valeev via Colin Patrick McCabe)
+
+Release 2.5.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
     HADOOP-10498. Add support for proxy server. (daryn)
 
+    HADOOP-9704. Write metrics sink plugin for Hadoop/Graphite (Chu Tong, Alex Newman and Babak Behzad via raviprak)
+
+    HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli)
+
+    HADOOP-9361 Strictly define the expected behavior of filesystem APIs and
+    write tests to verify compliance (stevel)
+
   IMPROVEMENTS
 
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
@@ -348,10 +654,116 @@ Release 2.5.0 - UNRELEASED
 
     HADOOP-10104. Update jackson to 1.9.13 (Akira Ajisaka via stevel)
 
-    HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
+    HADOOP-10503. Move junit up to v 4.11. (cnauroth)
+
+    HADOOP-10535. Make the retry numbers in ActiveStandbyElector configurable.
+    (jing9)
+
+    HADOOP-10322. Add ability to read principal names from a keytab.
+    (Benoy Antony and Daryn Sharp via kihwal)
+
+    HADOOP-10549. MAX_SUBST and varPat should be final in Configuration.java.
+    (Gera Shegalov via cnauroth)
+
+    HADOOP-10471. Reduce the visibility of constants in ProxyUsers.
+    (Benoy Antony via wheat9)
+
+    HADOOP-10556. Add toLowerCase support to auth_to_local rules 
+    for service name. (tucu)
+
+    HADOOP-10467. Enable proxyuser specification to support list of users in
+    addition to list of groups (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10158. SPNEGO should work with multiple interfaces/SPNs.
+    (daryn via kihwal)
+
+    HADOOP-10566. Refactor proxyservers out of ProxyUsers.
+    (Benoy Antony via suresh)
+
+    HADOOP-10572. Example NFS mount command must pass noacl as it isn't
+    supported by the server yet. (Harsh J via brandonli)
+
+    HADOOP-10609. .gitignore should ignore .orig and .rej files. (kasha)
+
+    HADOOP-10614. CBZip2InputStream is not threadsafe (Xiangrui Meng via
+    Sandy Ryza)
+
+    HADOOP-10618. Remove SingleNodeSetup.apt.vm. (Akira Ajisaka via
+    Arpit Agarwal)
+
+    HADOOP-9968. Makes ProxyUsers to work with NetGroups (Benoy Antony via 
+    ddas)
+
+    HADOOP-10448. Support pluggable mechanism to specify proxy user settings.
+    (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-9555. HA functionality that uses ZooKeeper may experience inadvertent
+    TCP RST and miss session expiration event due to bug in client connection
+    management. (cnauroth)
+
+    HADOOP-10376. Refactor refresh*Protocols into a single generic
+    refreshConfigProtocol. (Chris Li via Arpit Agarwal)
+
+    HADOOP-6350. Documenting Hadoop metrics. (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-10691. Improve the readability of 'hadoop fs -help'.
+    (Lei Xu via wang)
+
+    HADOOP-10688. Expose thread-level FileSystem StatisticsData (Sandy Ryza)
+
+    HADOOP-10657. Have RetryInvocationHandler log failover attempt at INFO
+    level. (Ming Ma via jing9)
+
+    HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
+    the source files license header. (Henry Saputra via wang)
+
+    HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs.
+    (Akira Ajisaka via cnauroth)
+
+    HADOOP-10279. Create multiplexer, a requirement for the fair queue.
+    (Chris Li via Arpit Agarwal)
+
+    HADOOP-10659. Refactor AccessControlList to reuse utility functions
+    and to improve performance. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10665. Make Hadoop Authentication Handler loads case in-sensitive
+    (Benoy Antony via vinayakumarb)
+
+    HADOOP-10652. Refactor Proxyusers to use AccessControlList. (Benoy
+    Antony via Arpit Agarwal)
+
+    HADOOP-10747. Support configurable retries on SASL connection failures in
+    RPC client. (cnauroth)
+
+    HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows.
+    (cnauroth)
+
+    HADOOP-10565. Support IP ranges (CIDR) in proxyuser.hosts. (Benoy Antony
+    via Arpit Agarwal)
+
+    HADOOP-10649. Allow overriding the default ACL for service authorization
+    (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth)
+
+    HADOOP-9651 Filesystems to throw FileAlreadyExistsException in
+    createFile(path, overwrite=false) when the file exists (stevel)
+    
+    HADOOP-9495 Define behaviour of Seekable.seek(), write tests,
+    fix all hadoop implementations for compliance
+
+    HADOOP-10312 Shell.ExitCodeException to have more useful toString (stevel)
+
+    HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh)
+
+    HADOOP-10896. Update compatibility doc to capture visibility of 
+    un-annotated classes/ methods. (kasha)
 
   OPTIMIZATIONS
 
+    HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32
+    for Java 7 and above. (szetszwo)
+
   BUG FIXES 
 
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
@@ -393,7 +805,167 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10499. Remove unused parameter from ProxyUsers.authorize().
     (Benoy Antony via cnauroth)
 
-Release 2.4.1 - UNRELEASED
+    HADOOP-9919. Update hadoop-metrics2.properties examples to Yarn.
+    (Akira AJISAKA via suresh)
+
+    HADOOP-10526. Chance for Stream leakage in CompressorStream. (Rushabh 
+    Shah via kihwal)
+
+    HADOOP-10251. Both NameNodes could be in STANDBY State if SNN network is unstable
+    (Vinayakumar B via umamahesh)
+
+    HADOOP-10531. hadoop-config.sh - bug in --hosts argument.
+    (Sebastien Barrier via wang)
+
+    HADOOP-10539. Provide backward compatibility for ProxyUsers.authorize()
+    call. (Benoy Antony via cnauroth)
+
+    HADOOP-10540. Datanode upgrade in Windows fails with hardlink error.
+    (Chris Nauroth and Arpit Agarwal)
+
+    HADOOP-10508. RefreshCallQueue fails when authorization is enabled.
+    (Chris Li via wheat9)
+
+    HADOOP-10547. Give SaslPropertiesResolver.getDefaultProperties() public
+    scope. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10543. RemoteException's unwrapRemoteException method failed for
+    PathIOException. (Yongjun Zhang via atm)
+
+    HADOOP-10568. Add s3 server-side encryption. (David S. Wang via atm)
+
+    HADOOP-10541. InputStream in MiniKdc#initKDCServer for minikdc.ldiff is not
+    closed. (Swarnim Kulkarni via cnauroth)
+
+    HADOOP-10517. InputStream is not closed in two methods of JarFinder.
+    (Ted Yu via cnauroth)
+
+    HADOOP-10581. TestUserGroupInformation#testGetServerSideGroups fails
+    because groups stored in Set and ArrayList are compared. 
+    (Mit Desai via kihwal)
+
+    HADOOP-10585. Retry polices ignore interrupted exceptions (Daryn Sharp via
+    jeagles)
+
+    HADOOP-10401. ShellBasedUnixGroupsMapping#getGroups does not always return
+    primary group first (Akira AJISAKA via Colin Patrick McCabe)
+
+    HADOOP-10489. UserGroupInformation#getTokens and UserGroupInformation
+    #addToken can lead to ConcurrentModificationException (Robert Kanter via atm)
+
+    HADOOP-10602. Documentation has broken "Go Back" hyperlinks.
+    (Akira AJISAKA via cnauroth)
+
+    HADOOP-10639. FileBasedKeyStoresFactory initialization is not using default
+    for SSL_REQUIRE_CLIENT_CERT_KEY. (tucu)
+
+    HADOOP-10638. Updating hadoop-daemon.sh to work as expected when nfs is
+    started as a privileged user. (Manikandan Narayanaswamy via atm)
+
+    HADOOP-10630. Possible race condition in RetryInvocationHandler. (jing9)
+
+    HADOOP-10658. SSLFactory expects truststores being configured. (tucu via atm)
+
+    HADOOP-10647. String Format Exception in SwiftNativeFileSystemStore.java.
+    (Gene Kim via stevel)
+
+    HADOOP-9099. NetUtils.normalizeHostName fails on domains where
+    UnknownHost resolves to an IP address. (Ivan Mitic via suresh)
+
+    HADOOP-10664. TestNetUtils.testNormalizeHostName fails. (atm)
+
+    HADOOP-10656. The password keystore file is not picked by LDAP group mapping
+    (brandonli)
+
+    HADOOP-10622. Shell.runCommand can deadlock (Gera Shegalov via jlowe)
+
+    HADOOP-10686. Writables are not always configured. 
+    (Abraham Elmahrek via kasha)
+
+    HADOOP-10678. SecurityUtil has unnecessary synchronization on collection
+    used for only tests. (Benoy Antony via cnauroth)
+
+    HADOOP-10683. Users authenticated with KERBEROS are recorded as being
+    authenticated with SIMPLE. (Benoy Antony via cnauroth)
+
+    HADOOP-10702. KerberosAuthenticationHandler does not log the principal names
+    correctly. (Benoy Antony via cnauroth)
+
+    HADOOP-10699. Fix build native library on mac osx (Binglin Chang via
+    jlowe)
+
+    HADOOP-10660. GraphiteSink should implement Closeable (Chen He and Ted Yu via raviprak)
+
+    HADOOP-10716. Cannot use more than 1 har filesystem.
+    (Rushabh Shah via cnauroth)
+
+    HADOOP-9559. When metrics system is restarted MBean names get incorrectly
+    flagged as dupes. (Mostafa Elhemali and Mike Liddell via cnauroth)
+
+    HADOOP-10746. TestSocketIOWithTimeout#testSocketIOWithTimeout fails on
+    Power PC. (Jinghui Wang via Arpit Agarwal)
+
+    HADOOP-9705. FsShell cp -p does not preserve directory attibutes.
+    (Akira AJISAKA via cnauroth)
+
+    HADOOP-10739. Renaming a file into a directory containing the same
+    filename results in a confusing I/O error (chang li via jlowe)
+
+    HADOOP-10533 S3 input stream NPEs in MapReduce join (stevel)
+
+    HADOOP-10419 BufferedFSInputStream NPEs on getPos() on a closed stream
+    (stevel)
+
+    HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
+
+    HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb)
+
+    HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu)
+
+    HADOOP-10479. Fix new findbugs warnings in hadoop-minikdc.
+    (Swarnim Kulkarni via wheat9)
+
+    HADOOP-10715. Remove public GraphiteSink#setWriter (Babak Behzad via raviprak)
+
+    HADOOP-10710. hadoop.auth cookie is not properly constructed according to 
+    RFC2109. (Juan Yu via tucu)
+
+    HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
+    via Arpit Agarwal)
+
+    HADOOP-10872. TestPathData fails intermittently with "Mkdirs failed
+    to create d1". (Yongjun Zhang via Arpit Agarwal)
+
+    HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun
+    Zhang via Arpit Agarwal)
+
+    HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
+    via Arpit Agarwal)
+
+    HADOOP-10910. Increase findbugs maxHeap size. (wang)
+
+  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
+
+    HADOOP-10520. Extended attributes definition and FileSystem APIs for
+    extended attributes. (Yi Liu via wang)
+
+    HADOOP-10546. Javadoc and other small fixes for extended attributes in
+    hadoop-common. (Charles Lamb via wang)
+
+    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
+
+    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
+
+    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
+
+    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
+
+    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
+
+    HADOOP-10561. Copy command with preserve option should handle Xattrs.
+    (Yi Liu via cnauroth)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 
@@ -420,6 +992,17 @@ Release 2.4.1 - UNRELEASED
     HADOOP-10490. TestMapFile and TestBloomMapFile leak file descriptors.
     (cnauroth)
 
+    HADOOP-10522. JniBasedUnixGroupMapping mishandles errors. (kihwal)
+
+    HADOOP-10527. Fix incorrect return code and allow more retries on EINTR.
+    (kihwal)
+
+    HADOOP-10612. NFS failed to refresh the user group id mapping table (brandonli)
+
+    HADOOP-10562. Namenode exits on exception without printing stack trace
+    in AbstractDelegationTokenSecretManager. (Suresh Srinivas via Arpit
+    Agarwal)
+
 Release 2.4.0 - 2014-04-07 
 
   INCOMPATIBLE CHANGES

+ 10 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -287,6 +287,10 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.proto\.RefreshCallQueueProtocolProtos.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.proto\.GenericRefreshProtocolProtos.*"/>
+    </Match>
 
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.
@@ -357,4 +361,10 @@
        <Bug code="NP" />
      </Match>
 
+  <Match>
+    <Class name="org.apache.hadoop.crypto.key.kms.KMSClientProvider"/>
+    <Method name="validateResponse"/>
+    <Bug pattern="REC_CATCH_EXCEPTION"/>
+  </Match>
+
 </FindBugsFilter>

+ 17 - 20
hadoop-common-project/hadoop-common/pom.xml

@@ -103,6 +103,11 @@
       <artifactId>jetty-util</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>javax.servlet.jsp</groupId>
+      <artifactId>jsp-api</artifactId>
+      <scope>runtime</scope>
+    </dependency>
     <dependency>
       <groupId>com.sun.jersey</groupId>
       <artifactId>jersey-core</artifactId>
@@ -119,26 +124,6 @@
       <artifactId>jersey-server</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-compiler</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-el</groupId>
-      <artifactId>commons-el</artifactId>
-      <scope>runtime</scope>
-    </dependency>
     <dependency>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
@@ -218,6 +203,17 @@
       <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
@@ -338,6 +334,7 @@
                   <include>RefreshAuthorizationPolicyProtocol.proto</include>
                   <include>RefreshUserMappingsProtocol.proto</include>
                   <include>RefreshCallQueueProtocol.proto</include>
+                  <include>GenericRefreshProtocol.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

+ 6 - 0
hadoop-common-project/hadoop-common/src/JNIFlags.cmake

@@ -78,6 +78,12 @@ IF("${CMAKE_SYSTEM}" MATCHES "Linux")
         SET(_java_libarch "amd64")
     ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
         SET(_java_libarch "arm")
+    ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
+        IF(EXISTS "${_JAVA_HOME}/jre/lib/ppc64le")
+                SET(_java_libarch "ppc64le")
+        ELSE()
+                SET(_java_libarch "ppc64")
+        ENDIF()
     ELSE()
         SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
     ENDIF()

+ 132 - 92
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -15,124 +15,164 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This script runs the hadoop core commands. 
-
-bin=`which $0`
-bin=`dirname ${bin}`
-bin=`cd "$bin" > /dev/null; pwd`
- 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-function print_usage(){
+function hadoop_usage()
+{
   echo "Usage: hadoop [--config confdir] COMMAND"
   echo "       where COMMAND is one of:"
-  echo "  fs                   run a generic filesystem user client"
-  echo "  version              print the version"
-  echo "  jar <jar>            run a jar file"
-  echo "  checknative [-a|-h]  check native hadoop and compression libraries availability"
-  echo "  distcp <srcurl> <desturl> copy file or directories recursively"
-  echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+  echo "  archive -archiveName NAME -p <parent path> <src>* <dest>"
+  echo "                       create a Hadoop archive"
+  echo "  checknative [-a|-h]  check native Hadoop and compression "
+  echo "                         libraries availability"
   echo "  classpath            prints the class path needed to get the"
-  echo "                       Hadoop jar and the required libraries"
+  echo "                         Hadoop jar and the required libraries"
+  echo "  credential           interact with credential providers"
   echo "  daemonlog            get/set the log level for each daemon"
+  echo "  distch path:owner:group:permisson"
+  echo "                       distributed metadata changer"
+  echo "  distcp <srcurl> <desturl> "
+  echo "                       copy file or directories recursively"
+  echo "  fs                   run a generic filesystem user client"
+  echo "  jar <jar>            run a jar file"
+  echo "  jnipath              prints the java.library.path"
+  echo "  key                  manage keys via the KeyProvider"
+  echo "  version              print the version"
   echo " or"
   echo "  CLASSNAME            run the class named CLASSNAME"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
 
+
+# This script runs the hadoop core commands.
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
+
 if [ $# = 0 ]; then
-  print_usage
-  exit
+  hadoop_exit_with_usage 1
 fi
 
 COMMAND=$1
-case $COMMAND in
-  # usage flags
-  --help|-help|-h)
-    print_usage
-    exit
-    ;;
+shift
 
-  #hdfs commands
-  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups|portmap|nfs3)
-    echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
-    echo "Instead use the hdfs command for it." 1>&2
-    echo "" 1>&2
-    #try to locate hdfs and if present, delegate to it.  
-    shift
-    if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  "$@"
-    elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+case ${COMMAND} in
+  balancer|datanode|dfs|dfsadmin|dfsgroups|  \
+  namenode|secondarynamenode|fsck|fetchdt|oiv| \
+  portmap|nfs3)
+    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
+    COMMAND=${COMMAND/dfsgroups/groups}
+    hadoop_error "WARNING: Attempting to execute replacement \"hdfs ${COMMAND}\" instead."
+    hadoop_error ""
+    #try to locate hdfs and if present, delegate to it.
+    if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
+      # shellcheck disable=SC2086
+      exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}"  "$@"
+    elif [[ -f "${HADOOP_PREFIX}/bin/hdfs" ]]; then
+      # shellcheck disable=SC2086
+      exec "${HADOOP_PREFIX}/bin/hdfs" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
     else
-      echo "HADOOP_HDFS_HOME not found!"
+      hadoop_error "HADOOP_HDFS_HOME not found!"
       exit 1
     fi
-    ;;
-
+  ;;
+  
   #mapred commands for backwards compatibility
   pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
-    echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
-    echo "Instead use the mapred command for it." 1>&2
-    echo "" 1>&2
+    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
+    COMMAND=${COMMAND/mrgroups/groups}
+    hadoop_error "WARNING: Attempting to execute replacement \"mapred ${COMMAND}\" instead."
+    hadoop_error ""
     #try to locate mapred and if present, delegate to it.
-    shift
-    if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
-    elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+    if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
+      exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
+    elif [[ -f "${HADOOP_PREFIX}/bin/mapred" ]]; then
+      exec "${HADOOP_PREFIX}/bin/mapred" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
     else
-      echo "HADOOP_MAPRED_HOME not found!"
+      hadoop_error "HADOOP_MAPRED_HOME not found!"
       exit 1
     fi
-    ;;
-
+  ;;
+  archive)
+    CLASS=org.apache.hadoop.tools.HadoopArchives
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  checknative)
+    CLASS=org.apache.hadoop.util.NativeLibraryChecker
+  ;;
   classpath)
-    echo $CLASSPATH
-    exit
-    ;;
-
-  #core commands  
-  *)
-    # the core commands
-    if [ "$COMMAND" = "fs" ] ; then
-      CLASS=org.apache.hadoop.fs.FsShell
-    elif [ "$COMMAND" = "version" ] ; then
-      CLASS=org.apache.hadoop.util.VersionInfo
-    elif [ "$COMMAND" = "jar" ] ; then
-      CLASS=org.apache.hadoop.util.RunJar
-    elif [ "$COMMAND" = "key" ] ; then
-      CLASS=org.apache.hadoop.crypto.key.KeyShell
-    elif [ "$COMMAND" = "checknative" ] ; then
-      CLASS=org.apache.hadoop.util.NativeLibraryChecker
-    elif [ "$COMMAND" = "distcp" ] ; then
-      CLASS=org.apache.hadoop.tools.DistCp
-      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-    elif [ "$COMMAND" = "daemonlog" ] ; then
-      CLASS=org.apache.hadoop.log.LogLevel
-    elif [ "$COMMAND" = "archive" ] ; then
-      CLASS=org.apache.hadoop.tools.HadoopArchives
-      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-    elif [[ "$COMMAND" = -*  ]] ; then
-        # class and package names cannot begin with a -
-        echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
-        exit 1
-    else
-      CLASS=$COMMAND
+    if [[ "$#" -eq 1 ]]; then
+      CLASS=org.apache.hadoop.util.Classpath
+    else   
+      hadoop_finalize
+      echo "${CLASSPATH}"
+      exit 0
     fi
-    shift
-    
-    # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-    HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+  ;;
+  credential)
+    CLASS=org.apache.hadoop.security.alias.CredentialShell
+  ;;
+  daemonlog)
+    CLASS=org.apache.hadoop.log.LogLevel
+  ;;
+  distch)
+    CLASS=org.apache.hadoop.tools.DistCh
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  distcp)
+    CLASS=org.apache.hadoop.tools.DistCp
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  fs)
+    CLASS=org.apache.hadoop.fs.FsShell
+  ;;
+  jar)
+    CLASS=org.apache.hadoop.util.RunJar
+  ;;
+  jnipath)
+    hadoop_finalize
+    echo "${JAVA_LIBRARY_PATH}"
+    exit 0
+  ;;
+  key)
+    CLASS=org.apache.hadoop.crypto.key.KeyShell
+  ;;
+  version)
+    CLASS=org.apache.hadoop.util.VersionInfo
+  ;;
+  -*|hdfs)
+    hadoop_exit_with_usage 1
+  ;;
+  *)
+    CLASS="${COMMAND}"
+  ;;
+esac
 
-    #make sure security appender is turned off
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
-    export CLASSPATH=$CLASSPATH
-    exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
-    ;;
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
+
+hadoop_finalize
+export CLASSPATH
+hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 
-esac

+ 147 - 251
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -1,3 +1,5 @@
+#
+#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -13,282 +15,176 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+####
+# IMPORTANT
+####
+
+## The hadoop-config.sh tends to get executed by non-Hadoop scripts.
+## Those parts expect this script to parse/manipulate $@. In order
+## to maintain backward compatibility, this means a surprising
+## lack of functions for bits that would be much better off in
+## a function.
+##
+## In other words, yes, there is some bad things happen here and
+## unless we break the rest of the ecosystem, we can't change it. :(
+
+
 # included in all the hadoop scripts with source command
 # should not be executable directly
 # also should not be passed any arguments, since we need original $*
-
-# Resolve links ($0 may be a softlink) and convert a relative path
-# to an absolute path.  NB: The -P option requires bash built-ins
-# or POSIX:2001 compliant cd and pwd.
-
-#   HADOOP_CLASSPATH Extra Java CLASSPATH entries.
 #
-#   HADOOP_USER_CLASSPATH_FIRST      When defined, the HADOOP_CLASSPATH is 
-#                                    added in the beginning of the global
-#                                    classpath. Can be defined, for example,
-#                                    by doing 
-#                                    export HADOOP_USER_CLASSPATH_FIRST=true
-#
-
-this="${BASH_SOURCE-$0}"
-common_bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
-script="$(basename -- "$this")"
-this="$common_bin/$script"
-
-[ -f "$common_bin/hadoop-layout.sh" ] && . "$common_bin/hadoop-layout.sh"
-
-HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
-HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
-HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
-HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
-HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
-YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
-YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
-MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
-MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
-
-# the root of the Hadoop installation
-# See HADOOP-6255 for directory structure layout
-HADOOP_DEFAULT_PREFIX=$(cd -P -- "$common_bin"/.. && pwd -P)
-HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
-export HADOOP_PREFIX
-
-#check to see if the conf dir is given as an optional argument
-if [ $# -gt 1 ]
-then
-    if [ "--config" = "$1" ]
-	  then
-	      shift
-	      confdir=$1
-	      if [ ! -d "$confdir" ]; then
-                echo "Error: Cannot find configuration directory: $confdir"
-                exit 1
-             fi
-	      shift
-	      HADOOP_CONF_DIR=$confdir
-    fi
-fi
- 
-# Allow alternate conf dir location.
-if [ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]; then
-  DEFAULT_CONF_DIR="conf"
-else
-  DEFAULT_CONF_DIR="etc/hadoop"
-fi
-
-export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"
+# after doing more config, caller should also exec finalize
+# function to finish last minute/default configs for
+# settings that might be different between daemons & interactive
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-
-# User can specify hostnames or a file where the hostnames are (not both)
-if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
-  echo \
-    "Error: Please specify one variable HADOOP_SLAVES or " \
-    "HADOOP_SLAVE_NAME and not both."
+# you must be this high to ride the ride
+if [[ -z "${BASH_VERSINFO}" ]] || [[ "${BASH_VERSINFO}" -lt 3 ]]; then
+  echo "Hadoop requires bash v3 or better. Sorry."
   exit 1
 fi
 
-# Process command line options that specify hosts or file with host
-# list
-if [ $# -gt 1 ]
-then
-    if [ "--hosts" = "$1" ]
-    then
-        shift
-        export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$$1"
-        shift
-    elif [ "--hostnames" = "$1" ]
-    then
-        shift
-        export HADOOP_SLAVE_NAMES=$1
-        shift
-    fi
-fi
+# In order to get partially bootstrapped, we need to figure out where
+# we are located. Chances are good that our caller has already done
+# this work for us, but just in case...
 
-# User can specify hostnames or a file where the hostnames are (not both)
-# (same check as above but now we know it's command line options that cause
-# the problem)
-if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
-  echo \
-    "Error: Please specify one of --hosts or --hostnames options and not both."
-  exit 1
+if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+  _hadoop_common_this="${BASH_SOURCE-$0}"
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P)
 fi
 
-# check if net.ipv6.bindv6only is set to 1
-bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
-if [ -n "$bindv6only" ] && [ "$bindv6only" -eq "1" ] && [ "$HADOOP_ALLOW_IPV6" != "yes" ]
-then
-  echo "Error: \"net.ipv6.bindv6only\" is set to 1 - Java networking could be broken"
-  echo "For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
+# get our functions defined for usage later
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
+else
+  echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
   exit 1
 fi
 
-# Newer versions of glibc use an arena memory allocator that causes virtual
-# memory usage to explode. This interacts badly with the many threads that
-# we use in Hadoop. Tune the variable down to prevent vmem explosion.
-export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
-
-# Attempt to set JAVA_HOME if it is not set
-if [[ -z $JAVA_HOME ]]; then
-  # On OSX use java_home (or /Library for older versions)
-  if [ "Darwin" == "$(uname -s)" ]; then
-    if [ -x /usr/libexec/java_home ]; then
-      export JAVA_HOME=($(/usr/libexec/java_home))
-    else
-      export JAVA_HOME=(/Library/Java/Home)
-    fi
-  fi
-
-  # Bail if we did not detect it
-  if [[ -z $JAVA_HOME ]]; then
-    echo "Error: JAVA_HOME is not set and could not be found." 1>&2
-    exit 1
-  fi
-fi
-
-JAVA=$JAVA_HOME/bin/java
-# some Java parameters
-JAVA_HEAP_MAX=-Xmx1000m 
-
-# check envvars which might override default args
-if [ "$HADOOP_HEAPSIZE" != "" ]; then
-  #echo "run with heapsize $HADOOP_HEAPSIZE"
-  JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
-  #echo $JAVA_HEAP_MAX
-fi
-
-# CLASSPATH initially contains $HADOOP_CONF_DIR
-CLASSPATH="${HADOOP_CONF_DIR}"
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-if [ "$HADOOP_COMMON_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_DIR" ]; then
-    export HADOOP_COMMON_HOME=$HADOOP_PREFIX
-  fi
-fi
-
-# for releases, add core hadoop jar & webapps to CLASSPATH
-if [ -d "$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR
+# allow overrides of the above and pre-defines of the below
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
 fi
 
-if [ -d "$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR'/*'
-fi
-
-CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR'/*'
-
-# default log directory & file
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
-fi
-if [ "$HADOOP_LOGFILE" = "" ]; then
-  HADOOP_LOGFILE='hadoop.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$HADOOP_POLICYFILE" = "" ]; then
-  HADOOP_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-# setup 'java.library.path' for native-hadoop code if necessary
-
-if [ -d "${HADOOP_PREFIX}/build/native" -o -d "${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR" ]; then
-    
-  if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR" ]; then
-    if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR
-    else
-      JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR
-    fi
-  fi
-fi
-
-# setup a default TOOL_PATH
-TOOL_PATH="${TOOL_PATH:-$HADOOP_PREFIX/share/hadoop/tools/lib/*}"
-
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_PREFIX"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH
-fi  
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
-
-# Disable ipv6 as it can cause issues
-HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
-
-# put hdfs in classpath if present
-if [ "$HADOOP_HDFS_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$HDFS_DIR" ]; then
-    export HADOOP_HDFS_HOME=$HADOOP_PREFIX
-  fi
-fi
-
-if [ -d "$HADOOP_HDFS_HOME/$HDFS_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR
-fi
+#
+# IMPORTANT! We are not executing user provided code yet!
+#
 
-if [ -d "$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR'/*'
-fi
+# Let's go!  Base definitions so we can move forward
+hadoop_bootstrap_init
 
-CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
+# let's find our conf.
+#
+# first, check and process params passed to us
+# we process this in-line so that we can directly modify $@
+# if something downstream is processing that directly,
+# we need to make sure our params have been ripped out
+# note that we do many of them here for various utilities.
+# this provides consistency and forces a more consistent
+# user experience
+
+
+# save these off in case our caller needs them
+# shellcheck disable=SC2034
+HADOOP_USER_PARAMS="$@"
+
+HADOOP_DAEMON_MODE="default"
+
+while [[ -z "${_hadoop_common_done}" ]]; do
+  case $1 in
+    --buildpaths)
+      # shellcheck disable=SC2034
+      HADOOP_ENABLE_BUILD_PATHS=true
+      shift
+    ;;
+    --config)
+      shift
+      confdir=$1
+      shift
+      if [[ -d "${confdir}" ]]; then
+        # shellcheck disable=SC2034
+        YARN_CONF_DIR="${confdir}"
+        # shellcheck disable=SC2034
+        HADOOP_CONF_DIR="${confdir}"
+      elif [[ -z "${confdir}" ]]; then
+        hadoop_error "ERROR: No parameter provided for --config "
+        hadoop_exit_with_usage 1
+      else
+        hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+    --daemon)
+      shift
+      HADOOP_DAEMON_MODE=$1
+      shift
+      if [[ -z "${HADOOP_DAEMON_MODE}" || \
+        ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
+        hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+    --help|-help|-h|help|--h|--\?|-\?|\?)
+      hadoop_exit_with_usage 0
+    ;;
+    --hostnames)
+      shift
+      # shellcheck disable=SC2034
+      HADOOP_SLAVE_NAMES="$1"
+      shift
+    ;;
+    --hosts)
+      shift
+      hadoop_populate_slaves_file "$1"
+      shift
+    ;;
+    *)
+      _hadoop_common_done=true
+    ;;
+  esac
+done
+
+hadoop_find_confdir
+hadoop_exec_hadoopenv
 
-# put yarn in classpath if present
-if [ "$HADOOP_YARN_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
-    export HADOOP_YARN_HOME=$HADOOP_PREFIX
-  fi
-fi
+#
+# IMPORTANT! User provided code is now available!
+#
 
-if [ -d "$HADOOP_YARN_HOME/$YARN_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
-fi
+# do all the OS-specific startup bits here
+# this allows us to get a decent JAVA_HOME,
+# call crle for LD_LIBRARY_PATH, etc.
+hadoop_os_tricks
 
-if [ -d "$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
-fi
+hadoop_java_setup
 
-CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
+hadoop_basic_init
 
-# put mapred in classpath if present AND different from YARN
-if [ "$HADOOP_MAPRED_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$MAPRED_DIR" ]; then
-    export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
-  fi
+# inject any sub-project overrides, defaults, etc.
+if declare -F hadoop_subproject_init >/dev/null ; then
+  hadoop_subproject_init
 fi
 
-if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$HADOOP_YARN_HOME/$YARN_DIR" ] ; then
-  if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps" ]; then
-    CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
-  fi
+# get the native libs in there pretty quick
+hadoop_add_javalibpath "${HADOOP_PREFIX}/build/native"
+hadoop_add_javalibpath "${HADOOP_PREFIX}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
 
-  if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR" ]; then
-    CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR'/*'
-  fi
-
-  CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR'/*'
-fi
+# get the basic java class path for these subprojects
+# in as quickly as possible since other stuff
+# will definitely depend upon it.
+#
+# at some point, this will get replaced with something pluggable
+# so that these functions can sit in their projects rather than
+# common
+#
+for i in common hdfs yarn mapred
+do
+  hadoop_add_to_classpath_$i
+done
 
-# Add the user-specified CLASSPATH via HADOOP_CLASSPATH
-# Add it first or last depending on if user has
-# set env-var HADOOP_USER_CLASSPATH_FIRST
-if [ "$HADOOP_CLASSPATH" != "" ]; then
-  # Prefix it if its to be preceded
-  if [ "$HADOOP_USER_CLASSPATH_FIRST" != "" ]; then
-    CLASSPATH=${HADOOP_CLASSPATH}:${CLASSPATH}
-  else
-    CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
-  fi
+#
+# backwards compatibility. new stuff should
+# call this when they are ready
+#
+if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then
+  hadoop_finalize
 fi

+ 28 - 174
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh

@@ -15,188 +15,42 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-# Runs a Hadoop command as a daemon.
-#
-# Environment Variables
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
-#   HADOOP_LOG_DIR   Where log files are stored.  PWD by default.
-#   HADOOP_MASTER    host:path where hadoop code should be rsync'd from
-#   HADOOP_PID_DIR   The pid files are stored. /tmp by default.
-#   HADOOP_IDENT_STRING   A string representing this instance of hadoop. $USER by default
-#   HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
-##
-
-usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] [--script script] (start|stop) <hadoop-command> <args...>"
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-# get arguments
-
-#default value
-hadoopScript="$HADOOP_PREFIX"/bin/hadoop
-if [ "--script" = "$1" ]
-  then
-    shift
-    hadoopScript=$1
-    shift
-fi
-startStop=$1
-shift
-command=$1
-shift
-
-hadoop_rotate_log ()
+function hadoop_usage
 {
-    log=$1;
-    num=5;
-    if [ -n "$2" ]; then
-	num=$2
-    fi
-    if [ -f "$log" ]; then # rotate logs
-	while [ $num -gt 1 ]; do
-	    prev=`expr $num - 1`
-	    [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
-	    num=$prev
-	done
-	mv "$log" "$log.$num";
-    fi
+  echo "Usage: hadoop-daemon.sh [--config confdir] (start|stop|status) <hadoop-command> <args...>"
 }
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-
-# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
-if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
-  export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-  export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
-  starting_secure_dn="true"
-fi
-
-if [ "$HADOOP_IDENT_STRING" = "" ]; then
-  export HADOOP_IDENT_STRING="$USER"
-fi
-
-
-# get log directory
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
-fi
-
-if [ ! -w "$HADOOP_LOG_DIR" ] ; then
-  mkdir -p "$HADOOP_LOG_DIR"
-  chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
 fi
 
-if [ "$HADOOP_PID_DIR" = "" ]; then
-  HADOOP_PID_DIR=/tmp
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 
-# some variables
-export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
-export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
-export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
-log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
-pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
-HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+daemonmode=$1
+shift
 
-# Set default scheduling priority
-if [ "$HADOOP_NICENESS" = "" ]; then
-    export HADOOP_NICENESS=0
+if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
+  hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
+else
+  hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
 fi
 
-case $startStop in
-
-  (start)
-
-    [ -w "$HADOOP_PID_DIR" ] ||  mkdir -p "$HADOOP_PID_DIR"
-
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo $command running as process `cat $pid`.  Stop it first.
-        exit 1
-      fi
-    fi
-
-    if [ "$HADOOP_MASTER" != "" ]; then
-      echo rsync from $HADOOP_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX"
-    fi
-
-    hadoop_rotate_log $log
-    echo starting $command, logging to $log
-    cd "$HADOOP_PREFIX"
-    case $command in
-      namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc)
-        if [ -z "$HADOOP_HDFS_HOME" ]; then
-          hdfsScript="$HADOOP_PREFIX"/bin/hdfs
-        else
-          hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
-        fi
-        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
-      ;;
-      (*)
-        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
-      ;;
-    esac
-    echo $! > $pid
-    sleep 1
-    head "$log"
-    # capture the ulimit output
-    if [ "true" = "$starting_secure_dn" ]; then
-      echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
-      # capture the ulimit info for the appropriate user
-      su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
-    else
-      echo "ulimit -a for user $USER" >> $log
-      ulimit -a >> $log 2>&1
-    fi
-    sleep 3;
-    if ! ps -p $! > /dev/null ; then
-      exit 1
-    fi
-    ;;
-          
-  (stop)
-
-    if [ -f $pid ]; then
-      TARGET_PID=`cat $pid`
-      if kill -0 $TARGET_PID > /dev/null 2>&1; then
-        echo stopping $command
-        kill $TARGET_PID
-        sleep $HADOOP_STOP_TIMEOUT
-        if kill -0 $TARGET_PID > /dev/null 2>&1; then
-          echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
-          kill -9 $TARGET_PID
-        fi
-      else
-        echo no $command to stop
-      fi
-    else
-      echo no $command to stop
-    fi
-    ;;
-
-  (*)
-    echo $usage
-    exit 1
-    ;;
-
-esac
-
+exec "$hdfsscript" --config "${HADOOP_CONF_DIR}" --daemon "${daemonmode}" "$@"
 

+ 26 - 11
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh

@@ -18,19 +18,34 @@
 
 # Run a Hadoop command on all slave hosts.
 
-usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
+function hadoop_usage
+{
+  echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) <hadoop-command> <args...>"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
   exit 1
 fi
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
+fi
 
-exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"
+hadoop_connect_to_hosts "${bin}/hadoop-daemon.sh" \
+--config "${HADOOP_CONF_DIR}" "$@"

+ 1036 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -0,0 +1,1036 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function hadoop_error
+{
+  # NOTE: This function is not user replaceable.
+
+  echo "$*" 1>&2
+}
+
+function hadoop_bootstrap_init
+{
+  # NOTE: This function is not user replaceable.
+
+  # the root of the Hadoop installation
+  # See HADOOP-6255 for the expected directory structure layout
+  
+  # By now, HADOOP_LIBEXEC_DIR should have been defined upstream
+  # We can piggyback off of that to figure out where the default
+  # HADOOP_FREFIX should be.  This allows us to run without
+  # HADOOP_PREFIX ever being defined by a human! As a consequence
+  # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
+  # env var within Hadoop.
+  if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+    hadoop_error "HADOOP_LIBEXEC_DIR is not defined.  Exiting."
+    exit 1
+  fi
+  HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
+  HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
+  export HADOOP_PREFIX
+  
+  #
+  # short-cuts. vendors may redefine these as well, preferably
+  # in hadoop-layouts.sh
+  #
+  HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
+  HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
+  HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
+  HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
+  HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
+  YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
+  YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
+  MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
+  MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
+  # setup a default TOOL_PATH
+  TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
+
+  export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
+
+  
+  # defaults
+  export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
+}
+
+function hadoop_find_confdir
+{
+  # NOTE: This function is not user replaceable.
+
+  # Look for the basic hadoop configuration area.
+  #
+  #
+  # An attempt at compatibility with some Hadoop 1.x
+  # installs.
+  if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
+    DEFAULT_CONF_DIR="conf"
+  else
+    DEFAULT_CONF_DIR="etc/hadoop"
+  fi
+  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${DEFAULT_CONF_DIR}}"
+}
+
+function hadoop_exec_hadoopenv
+{
+  # NOTE: This function is not user replaceable.
+
+  if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
+    if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
+      export HADOOP_ENV_PROCESSED=true
+      . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+    fi
+  fi
+}
+
+
+function hadoop_basic_init
+{
+  # Some of these are also set in hadoop-env.sh.
+  # we still set them here just in case hadoop-env.sh is
+  # broken in some way, set up defaults, etc.
+  #
+  # but it is important to note that if you update these
+  # you also need to update hadoop-env.sh as well!!!
+  
+  # CLASSPATH initially contains $HADOOP_CONF_DIR
+  CLASSPATH="${HADOOP_CONF_DIR}"
+  
+  if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
+  [[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
+    export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # default policy file for service-level authorization
+  HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
+  
+  # define HADOOP_HDFS_HOME
+  if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
+  [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
+    export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # define HADOOP_YARN_HOME
+  if [[ -z "${HADOOP_YARN_HOME}" ]] &&
+  [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
+    export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # define HADOOP_MAPRED_HOME
+  if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
+  [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
+    export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  HADOOP_IDENT_STRING=${HADOP_IDENT_STRING:-$USER}
+  HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
+  HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
+  HADOOP_NICENESS=${HADOOP_NICENESS:-0}
+  HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+  HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
+  HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,console}
+  HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-INFO,RFA}
+  HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
+  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+  HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
+  HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
+  HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
+  HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
+}
+
+function hadoop_populate_slaves_file()
+{
+  # NOTE: This function is not user replaceable.
+
+  local slavesfile=$1
+  shift
+  if [[ -f "${slavesfile}" ]]; then
+    # shellcheck disable=2034
+    HADOOP_SLAVES="${slavesfile}"
+  elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then
+    # shellcheck disable=2034
+    HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
+    # shellcheck disable=2034
+    YARN_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
+  else
+    hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\""
+    hadoop_exit_with_usage 1
+  fi
+}
+
+function hadoop_rotate_log
+{
+  #
+  # log rotation (mainly used for .out files)
+  # Users are likely to replace this one for something
+  # that gzips or uses dates or who knows what.
+  #
+  # be aware that &1 and &2 might go through here
+  # so don't do anything too crazy...
+  #
+  local log=$1;
+  local num=${2:-5};
+  
+  if [[ -f "${log}" ]]; then # rotate logs
+    while [[ ${num} -gt 1 ]]; do
+      #shellcheck disable=SC2086
+      let prev=${num}-1
+      if [[ -f "${log}.${prev}" ]]; then
+        mv "${log}.${prev}" "${log}.${num}"
+      fi
+      num=${prev}
+    done
+    mv "${log}" "${log}.${num}"
+  fi
+}
+
+function hadoop_actual_ssh
+{
+  # we are passing this function to xargs
+  # should get hostname followed by rest of command line
+  local slave=$1
+  shift
+  
+  # shellcheck disable=SC2086
+  ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
+}
+
+function hadoop_connect_to_hosts
+{
+  # shellcheck disable=SC2124
+  local params="$@"
+  
+  #
+  # ssh (or whatever) to a host
+  #
+  # User can specify hostnames or a file where the hostnames are (not both)
+  if [[ -n "${HADOOP_SLAVES}" && -n "${HADOOP_SLAVE_NAMES}" ]] ; then
+    hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
+    exit 1
+  fi
+  
+  if [[ -n "${HADOOP_SLAVE_NAMES}" ]] ; then
+    SLAVE_NAMES=${HADOOP_SLAVE_NAMES}
+  else
+    SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
+  fi
+  
+  # if pdsh is available, let's use it.  otherwise default
+  # to a loop around ssh.  (ugh)
+  if [[ -e '/usr/bin/pdsh' ]]; then
+    if [[ -z "${HADOOP_SLAVE_NAMES}" ]] ; then
+      # if we were given a file, just let pdsh deal with it.
+      # shellcheck disable=SC2086
+      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
+      -f "${HADOOP_SSH_PARALLEL}" -w ^"${SLAVE_FILE}" $"${@// /\\ }" 2>&1
+    else
+      # no spaces allowed in the pdsh arg host list
+      # shellcheck disable=SC2086
+      SLAVE_NAMES=$(echo ${SLAVE_NAMES} | tr -s ' ' ,)
+      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
+      -f "${HADOOP_SSH_PARALLEL}" -w "${SLAVE_NAMES}" $"${@// /\\ }" 2>&1
+    fi
+  else
+    if [[ -z "${SLAVE_NAMES}" ]]; then
+      SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
+    fi
+    
+    # quoting here gets tricky. it's easier to push it into a function
+    # so that we don't have to deal with it. However...
+    # xargs can't use a function so instead we'll export it out
+    # and force it into a subshell
+    # moral of the story: just use pdsh.
+    export -f hadoop_actual_ssh
+    export HADOOP_SSH_OPTS
+    echo "${SLAVE_NAMES}" | \
+    xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
+    -I {} bash -c --  "hadoop_actual_ssh {} ${params}"
+    wait
+  fi
+}
+
+function hadoop_add_param
+{
+  #
+  # general param dedupe..
+  # $1 is what we are adding to
+  # $2 is the name of what we want to add (key)
+  # $3 is the key+value of what we're adding
+  #
+  # doing it this way allows us to support all sorts of
+  # different syntaxes, just so long as they are space
+  # delimited
+  #
+  if [[ ! ${!1} =~ $2 ]] ; then
+    # shellcheck disable=SC2086
+    eval $1="'${!1} $3'"
+  fi
+}
+
+function hadoop_add_classpath
+{
+  # two params:
+  # $1 = directory, file, wildcard, whatever to add
+  # $2 = before or after, which determines where in the
+  #      classpath this object should go. default is after
+  # return 0 = success
+  # return 1 = failure (duplicate, doesn't exist, whatever)
+  
+  # However, with classpath (& JLP), we can do dedupe
+  # along with some sanity checking (e.g., missing directories)
+  # since we have a better idea of what is legal
+  #
+  # for wildcard at end, we can
+  # at least check the dir exists
+  if [[ $1 =~ ^.*\*$ ]]; then
+    local mp=$(dirname "$1")
+    if [[ ! -d "${mp}" ]]; then
+      return 1
+    fi
+    
+    # no wildcard in the middle, so check existence
+    # (doesn't matter *what* it is)
+  elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
+    return 1
+  fi
+  
+  if [[ -z "${CLASSPATH}" ]]; then
+    CLASSPATH=$1
+  elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
+    if [[ "$2" = "before" ]]; then
+      CLASSPATH="$1:${CLASSPATH}"
+    else
+      CLASSPATH+=:$1
+    fi
+  fi
+  return 0
+}
+
+function hadoop_add_colonpath
+{
+  # two params:
+  # $1 = directory, file, wildcard, whatever to add
+  # $2 = before or after, which determines where in the
+  #      classpath this object should go
+  # return 0 = success
+  # return 1 = failure (duplicate)
+  
+  # this is CLASSPATH, JLP, etc but with dedupe but no
+  # other checking
+  if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
+    if [[ -z "${!1}" ]]; then
+      # shellcheck disable=SC2086
+      eval $1="'$2'"
+    elif [[ "$3" = "before" ]]; then
+      # shellcheck disable=SC2086
+      eval $1="'$2:${!1}'"
+    else
+      # shellcheck disable=SC2086
+      eval $1+="'$2'"
+    fi
+  fi
+}
+
+function hadoop_add_javalibpath
+{
+  # specialized function for a common use case
+  hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
+}
+
+function hadoop_add_ldlibpath
+{
+  # specialized function for a common use case
+  hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
+  
+  # note that we export this
+  export LD_LIBRARY_PATH
+}
+
+function hadoop_add_to_classpath_common
+{
+  
+  #
+  # get all of the common jars+config in the path
+  #
+  
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
+  fi
+  
+  if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_hdfs
+{
+  #
+  # get all of the hdfs jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
+  fi
+  
+  # put hdfs in classpath if present
+  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_yarn
+{
+  #
+  # get all of the yarn jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
+    yarn-server/yarn-server-nodemanager \
+    yarn-server/yarn-server-common \
+    yarn-server/yarn-server-resourcemanager; do
+      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
+    done
+    
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
+  fi
+  
+  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_mapred
+{
+  #
+  # get all of the mapreduce jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
+  fi
+  
+  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
+}
+
+
+function hadoop_add_to_classpath_userpath
+{
+  # Add the user-specified HADOOP_CLASSPATH to the
+  # official CLASSPATH env var.
+  # Add it first or last depending on if user has
+  # set env-var HADOOP_USER_CLASSPATH_FIRST
+  # we'll also dedupe it, because we're cool like that.
+  #
+  local c
+  local array
+  local i
+  local j
+  let c=0
+  
+  if [[ -n "${HADOOP_CLASSPATH}" ]]; then
+    # I wonder if Java runs on VMS.
+    for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
+      array[$c]=$i
+      let c+=1
+    done
+    let j=c-1
+    
+    if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
+      for ((i=j; i>=0; i--)); do
+        hadoop_add_classpath "${array[$i]}" before
+      done
+    else
+      for ((i=0; i<=j; i++)); do
+        hadoop_add_classpath "${array[$i]}" after
+      done
+    fi
+  fi
+}
+
+function hadoop_os_tricks
+{
+  local bindv6only
+
+  # some OSes have special needs. here's some out of the box
+  # examples for OS X and Linux. Vendors, replace this with your special sauce.
+  case ${HADOOP_OS_TYPE} in
+    Darwin)
+      if [[ -x /usr/libexec/java_home ]]; then
+        export JAVA_HOME="$(/usr/libexec/java_home)"
+      else
+        export JAVA_HOME=/Library/Java/Home
+      fi
+    ;;
+    Linux)
+      bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
+
+      # NOTE! HADOOP_ALLOW_IPV6 is a developer hook.  We leave it
+      # undocumented in hadoop-env.sh because we don't want users to
+      # shoot themselves in the foot while devs make IPv6 work.
+      if [[ -n "${bindv6only}" ]] && 
+         [[ "${bindv6only}" -eq "1" ]] && 
+         [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
+        hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
+        hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
+        hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
+        exit 1
+      fi
+      # Newer versions of glibc use an arena memory allocator that
+      # causes virtual # memory usage to explode. This interacts badly
+      # with the many threads that we use in Hadoop. Tune the variable
+      # down to prevent vmem explosion.
+      export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+    ;;
+  esac
+}
+
+function hadoop_java_setup
+{
+  # Bail if we did not detect it
+  if [[ -z "${JAVA_HOME}" ]]; then
+    hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
+    exit 1
+  fi
+  
+  if [[ ! -d "${JAVA_HOME}" ]]; then
+    hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
+    exit 1
+  fi
+  
+  JAVA="${JAVA_HOME}/bin/java"
+  
+  if [[ ! -x "$JAVA" ]]; then
+    hadoop_error "ERROR: $JAVA is not executable."
+    exit 1
+  fi
+  # shellcheck disable=SC2034
+  JAVA_HEAP_MAX=-Xmx1g
+  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+  
+  # check envvars which might override default args
+  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+    # shellcheck disable=SC2034
+    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+  fi
+}
+
+
+function hadoop_finalize_libpaths
+{
+  if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
+    hadoop_add_param HADOOP_OPTS java.library.path \
+    "-Djava.library.path=${JAVA_LIBRARY_PATH}"
+    export LD_LIBRARY_PATH
+  fi
+}
+
+#
+# fill in any last minute options that might not have been defined yet
+#
+# Note that we are replacing ' ' with '\ ' so that directories with
+# spaces work correctly when run exec blah
+#
+function hadoop_finalize_hadoop_opts
+{
+  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
+  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
+}
+
+function hadoop_finalize_classpath
+{
+  
+  # we want the HADOOP_CONF_DIR at the end
+  # according to oom, it gives a 2% perf boost
+  hadoop_add_classpath "${HADOOP_CONF_DIR}" after
+  
+  # user classpath gets added at the last minute. this allows
+  # override of CONF dirs and more
+  hadoop_add_to_classpath_userpath
+}
+
+function hadoop_finalize
+{
+  # user classpath gets added at the last minute. this allows
+  # override of CONF dirs and more
+  hadoop_finalize_classpath
+  hadoop_finalize_libpaths
+  hadoop_finalize_hadoop_opts
+}
+
+function hadoop_exit_with_usage
+{
+  # NOTE: This function is not user replaceable.
+
+  local exitcode=$1
+  if [[ -z $exitcode ]]; then
+    exitcode=1
+  fi
+  if declare -F hadoop_usage >/dev/null ; then
+    hadoop_usage
+  elif [[ -x /usr/bin/cowsay ]]; then
+    /usr/bin/cowsay -f elephant "Sorry, no help available."
+  else
+    hadoop_error "Sorry, no help available."
+  fi
+  exit $exitcode
+}
+
+function hadoop_verify_secure_prereq
+{
+  # if you are on an OS like Illumos that has functional roles
+  # and you are using pfexec, you'll probably want to change
+  # this.
+  
+  # ${EUID} comes from the shell itself!
+  if [[ "${EUID}" -ne 0 ]] || [[ -n "${HADOOP_SECURE_COMMAND}" ]]; then
+    hadoop_error "ERROR: You must be a privileged in order to run a secure serice."
+    return 1
+  else
+    return 0
+  fi
+}
+
+function hadoop_setup_secure_service
+{
+  # need a more complicated setup? replace me!
+  
+  HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
+  HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
+}
+
+function hadoop_verify_piddir
+{
+  if [[ -z "${HADOOP_PID_DIR}" ]]; then
+    hadoop_error "No pid directory defined."
+    exit 1
+  fi
+  if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
+    hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
+    mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
+      exit 1
+    fi
+  fi
+  touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
+    exit 1
+  fi
+  rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
+}
+
+function hadoop_verify_logdir
+{
+  if [[ -z "${HADOOP_LOG_DIR}" ]]; then
+    hadoop_error "No log directory defined."
+    exit 1
+  fi
+  if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
+    hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
+    mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
+      exit 1
+    fi
+  fi
+  touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
+    exit 1
+  fi
+  rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
+}
+
+function hadoop_status_daemon() {
+  #
+  # LSB 4.1.0 compatible status command (1)
+  #
+  # 0 = program is running
+  # 1 = dead, but still a pid (2)
+  # 2 = (not used by us)
+  # 3 = not running
+  #
+  # 1 - this is not an endorsement of the LSB
+  #
+  # 2 - technically, the specification says /var/run/pid, so
+  #     we should never return this value, but we're giving
+  #     them the benefit of a doubt and returning 1 even if
+  #     our pid is not in in /var/run .
+  #
+  
+  local pidfile=$1
+  shift
+  
+  local pid
+  
+  if [[ -f "${pidfile}" ]]; then
+    pid=$(cat "${pidfile}")
+    if ps -p "${pid}" > /dev/null 2>&1; then
+      return 0
+    fi
+    return 1
+  fi
+  return 3
+}
+
+function hadoop_java_exec
+{
+  # run a java command.  this is used for
+  # non-daemons
+
+  local command=$1
+  local class=$2
+  shift 2
+  # we eval this so that paths with spaces work
+  #shellcheck disable=SC2086
+  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+
+}
+
+function hadoop_start_daemon
+{
+  # this is our non-privileged daemon starter
+  # that fires up a daemon in the *foreground*
+  # so complex! so wow! much java!
+  local command=$1
+  local class=$2
+  shift 2
+  #shellcheck disable=SC2086
+  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+}
+
+function hadoop_start_daemon_wrapper
+{
+  # this is our non-privileged daemon start
+  # that fires up a daemon in the *background*
+  local daemonname=$1
+  local class=$2
+  local pidfile=$3
+  local outfile=$4
+  shift 4
+  
+  hadoop_rotate_log "${outfile}"
+  
+  hadoop_start_daemon "${daemonname}" \
+  "$class" "$@" >> "${outfile}" 2>&1 < /dev/null &
+  #shellcheck disable=SC2086
+  echo $! > "${pidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+  fi
+  
+  # shellcheck disable=SC2086
+  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot set priority of process $!"
+  fi
+  
+  # shellcheck disable=SC2086
+  disown $! 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot disconnect process $!"
+  fi
+  sleep 1
+  
+  # capture the ulimit output
+  ulimit -a >> "${outfile}" 2>&1
+  
+  # shellcheck disable=SC2086
+  if ! ps -p $! >/dev/null 2>&1; then
+    return 1
+  fi
+  return 0
+}
+
+function hadoop_start_secure_daemon
+{
+  # this is used to launch a secure daemon in the *foreground*
+  #
+  local daemonname=$1
+  local class=$2
+  
+  # pid file to create for our deamon
+  local daemonpidfile=$3
+  
+  # where to send stdout. jsvc has bad habits so this *may* be &1
+  # which means you send it to stdout!
+  local daemonoutfile=$4
+  
+  # where to send stderr.  same thing, except &2 = stderr
+  local daemonerrfile=$5
+  shift 5
+  
+  
+  
+  hadoop_rotate_log "${daemonoutfile}"
+  hadoop_rotate_log "${daemonerrfile}"
+  
+  jsvc="${JSVC_HOME}/jsvc"
+  if [[ ! -f "${jsvc}" ]]; then
+    hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
+    hadoop_error "or privileged daemons. Please download and install jsvc from "
+    hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ "
+    hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
+    exit 1
+  fi
+  
+  # note that shellcheck will throw a
+  # bogus for-our-use-case 2086 here.
+  # it doesn't properly support multi-line situations
+  
+  exec "${jsvc}" \
+  "-Dproc_${daemonname}" \
+  -outfile "${daemonoutfile}" \
+  -errfile "${daemonerrfile}" \
+  -pidfile "${daemonpidfile}" \
+  -nodetach \
+  -user "${HADOOP_SECURE_USER}" \
+  -cp "${CLASSPATH}" \
+  ${HADOOP_OPTS} \
+  "${class}" "$@"
+}
+
+function hadoop_start_secure_daemon_wrapper
+{
+  # this wraps hadoop_start_secure_daemon to take care
+  # of the dirty work to launch a daemon in the background!
+  local daemonname=$1
+  local class=$2
+  
+  # same rules as hadoop_start_secure_daemon except we
+  # have some additional parameters
+  
+  local daemonpidfile=$3
+  
+  local daemonoutfile=$4
+  
+  # the pid file of the subprocess that spawned our
+  # secure launcher
+  local jsvcpidfile=$5
+  
+  # the output of the subprocess that spawned our secure
+  # launcher
+  local jsvcoutfile=$6
+  
+  local daemonerrfile=$7
+  shift 7
+  
+  hadoop_rotate_log "${jsvcoutfile}"
+  
+  hadoop_start_secure_daemon \
+  "${daemonname}" \
+  "${class}" \
+  "${daemonpidfile}" \
+  "${daemonoutfile}" \
+  "${daemonerrfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
+  
+  # This wrapper should only have one child.  Unlike Shawty Lo.
+  #shellcheck disable=SC2086
+  echo $! > "${jsvcpidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+  fi
+  sleep 1
+  #shellcheck disable=SC2086
+  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot set priority of process $!"
+  fi
+  if [[ -f "${daemonpidfile}" ]]; then
+    #shellcheck disable=SC2046
+    renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}") >/dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Cannot set priority of process $(cat "${daemonpidfile}")"
+    fi
+  fi
+  #shellcheck disable=SC2086
+  disown $! 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot disconnect process $!"
+  fi
+  # capture the ulimit output
+  su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
+  #shellcheck disable=SC2086
+  if ! ps -p $! >/dev/null 2>&1; then
+    return 1
+  fi
+  return 0
+}
+
+function hadoop_stop_daemon
+{
+  local cmd=$1
+  local pidfile=$2
+  shift 2
+  
+  local pid
+  
+  if [[ -f "${pidfile}" ]]; then
+    pid=$(cat "$pidfile")
+    
+    kill "${pid}" >/dev/null 2>&1
+    sleep "${HADOOP_STOP_TIMEOUT}"
+    if kill -0 "${pid}" > /dev/null 2>&1; then
+      hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
+      kill -9 "${pid}" >/dev/null 2>&1
+    fi
+    if ps -p "${pid}" > /dev/null 2>&1; then
+      hadoop_error "ERROR: Unable to kill ${pid}"
+    else
+      rm -f "${pidfile}" >/dev/null 2>&1
+    fi
+  fi
+}
+
+
+function hadoop_stop_secure_daemon
+{
+  local command=$1
+  local daemonpidfile=$2
+  local privpidfile=$3
+  shift 3
+  local ret
+  
+  hadoop_stop_daemon "${command}" "${daemonpidfile}"
+  ret=$?
+  rm -f "${daemonpidfile}" "${privpidfile}" 2>/dev/null
+  return ${ret}
+}
+
+function hadoop_daemon_handler
+{
+  local daemonmode=$1
+  local daemonname=$2
+  local class=$3
+  local pidfile=$4
+  local outfile=$5
+  shift 5
+  
+  case ${daemonmode} in
+    status)
+      hadoop_status_daemon "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    stop)
+      hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    ##COMPAT  -- older hadoops would also start daemons by default
+    start|default)
+      hadoop_verify_piddir
+      hadoop_verify_logdir
+      hadoop_status_daemon "${daemon_pidfile}"
+      if [[ $? == 0  ]]; then
+        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        exit 1
+      else
+        # stale pid file, so just remove it and continue on
+        rm -f "${daemon_pidfile}" >/dev/null 2>&1
+      fi
+      ##COMPAT  - differenticate between --daemon start and nothing
+      # "nothing" shouldn't detach
+      if [[ "$daemonmode" = "default" ]]; then
+        hadoop_start_daemon "${daemonname}" "${class}" "$@"
+      else
+        hadoop_start_daemon_wrapper "${daemonname}" \
+        "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
+      fi
+    ;;
+  esac
+}
+
+
+function hadoop_secure_daemon_handler
+{
+  local daemonmode=$1
+  local daemonname=$2
+  local classname=$3
+  local daemon_pidfile=$4
+  local daemon_outfile=$5
+  local priv_pidfile=$6
+  local priv_outfile=$7
+  local priv_errfile=$8
+  shift 8
+  
+  case ${daemonmode} in
+    status)
+      hadoop_status_daemon "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    stop)
+      hadoop_stop_secure_daemon "${daemonname}" \
+      "${daemon_pidfile}" "${priv_pidfile}"
+      exit $?
+    ;;
+    
+    ##COMPAT  -- older hadoops would also start daemons by default
+    start|default)
+      hadoop_verify_piddir
+      hadoop_verify_logdir
+      hadoop_status_daemon "${daemon_pidfile}"
+      if [[ $? == 0  ]]; then
+        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        exit 1
+      else
+        # stale pid file, so just remove it and continue on
+        rm -f "${daemon_pidfile}" >/dev/null 2>&1
+      fi
+      
+      ##COMPAT  - differenticate between --daemon start and nothing
+      # "nothing" shouldn't detach
+      if [[ "${daemonmode}" = "default" ]]; then
+        hadoop_start_secure_daemon "${daemonname}" "${classname}" \
+        "${daemon_pidfile}" "${daemon_outfile}" \
+        "${priv_errfile}"  "$@"
+      else
+        hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
+        "${daemon_pidfile}" "${daemon_outfile}" \
+        "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}"  "$@"
+      fi
+    ;;
+  esac
+}
+

+ 93 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example

@@ -0,0 +1,93 @@
+# Copyright 2014 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##
+## VENDORS!
+##
+## This is where you can redefine the layout of Hadoop directories
+## and expect to be reasonably compatible.  Needless to say, this
+## is expert level stuff and one needs to tread carefully.
+##
+## If you move HADOOP_LIBEXEC_DIR from some location that
+## isn't bin/../libexec, you MUST define either HADOOP_LIBEXEC_DIR
+## or have HADOOP_PREFIX/libexec/hadoop-config.sh and
+## HADOOP_PREFIX/libexec/hadoop-layout.sh (this file) exist.
+
+## NOTE:
+##
+## hadoop-functions.sh gets executed BEFORE this file.  So you can
+##   redefine all of those functions here.
+##
+## *-env.sh get executed AFTER this file but generally too late to
+## override the settings (but not the functions!) here.  However, this
+## also means you cannot use things like HADOOP_CONF_DIR for these
+## definitions.
+
+####
+# Common disk layout
+####
+
+# Default location for the common/core Hadoop project
+# export HADOOP_COMMON_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_COMMON_HOME are located
+# export HADOOP_COMMON_DIR="share/hadoop/common"
+# export HADOOP_COMMON_LIB_JARS_DIR="share/hadoop/common/lib"
+# export HADOOP_COMMON_LIB_NATIVE_DIR="lib/native"
+
+####
+# HDFS disk layout
+####
+
+# Default location for the HDFS subproject
+# export HADOOP_HDFS_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_HDFS_HOME are located
+# export HDFS_DIR="share/hadoop/hdfs"
+# export HDFS_LIB_JARS_DIR="share/hadoop/hdfs/lib"
+
+####
+# YARN disk layout
+####
+
+# Default location for the YARN subproject
+# export HADOOP_YARN_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_YARN_HOME are located
+# export YARN_DIR="share/hadoop/yarn"
+# export YARN_LIB_JARS_DIR="share/hadoop/yarn/lib"
+
+# Default location for the MapReduce subproject
+# export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
+
+####
+# MapReduce disk layout
+####
+
+# Relative locations where components under HADOOP_MAPRED_HOME are located
+# export MAPRED_DIR="share/hadoop/mapreduce"
+# export MAPRED_LIB_JARS_DIR="share/hadoop/mapreduce/lib"
+
+####
+# Misc paths
+####
+
+# setup a default TOOL_PATH, where things like distcp lives
+# note that this path only gets added for certain commands and not
+# part of the general classpath
+# export TOOL_PATH="$HADOOP_PREFIX/share/hadoop/tools/lib/*"

+ 10 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -115,11 +115,14 @@ call :updatepath %HADOOP_BIN_PATH%
   )
 
   if %hadoop-command% == classpath (
-    @echo %CLASSPATH%
-    goto :eof
+    if not defined hadoop-command-arguments (
+      @rem No need to bother starting up a JVM for this simple case.
+      @echo %CLASSPATH%
+      exit /b
+    )
   )
   
-  set corecommands=fs version jar checknative distcp daemonlog archive
+  set corecommands=fs version jar checknative distcp daemonlog archive classpath
   for %%i in ( %corecommands% ) do (
     if %hadoop-command% == %%i set corecommand=true  
   )
@@ -175,6 +178,10 @@ call :updatepath %HADOOP_BIN_PATH%
   set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
   goto :eof
 
+:classpath
+  set CLASS=org.apache.hadoop.util.Classpath
+  goto :eof
+
 :updatepath
   set path_to_add=%*
   set current_path_comparable=%path%

+ 16 - 35
hadoop-common-project/hadoop-common/src/main/bin/rcc

@@ -15,47 +15,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-# The Hadoop record compiler
-#
-# Environment Variables
-#
-#   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.
-#
-#   HADOOP_OPTS      Extra Java runtime options.
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
-#
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+# This script runs the hadoop core commands.
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "$this")" >/dev/null && pwd -P)
+script="$(basename -- "$this")"
+this="$bin/$script"
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
+HADOOP_NEW_CONFIG=true
+. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
 
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
+if [ $# = 0 ]; then
+  hadoop_exit_with_usage 1
 fi
 
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m 
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
 
-# restore ordinary behaviour
-unset IFS
+# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
 
-CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+hadoop_add_param HADOOP_OPTS Xmx "$JAVA_HEAP_MAX"
 
-# run it
-exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
+hadoop_finalize
+export CLASSPATH
+hadoop_java_exec rcc "${CLASS}" "$@"

+ 23 - 28
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh

@@ -27,38 +27,33 @@
 #   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
 ##
 
-usage="Usage: slaves.sh [--config confdir] command..."
+function hadoop_usage {
+  echo "Usage: slaves.sh [--config confdir] command..."
+}
 
-# if no args specified, show usage
-if [ $# -le 0 ]; then
-  echo $usage
-  exit 1
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-
-# Where to start the script, see hadoop-config.sh
-# (it set up the variables based on command line options)
-if [ "$HADOOP_SLAVE_NAMES" != '' ] ; then
-  SLAVE_NAMES=$HADOOP_SLAVE_NAMES
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
 else
-  SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
-  SLAVE_NAMES=$(cat "$SLAVE_FILE" | sed  's/#.*$//;/^$/d')
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
+
+# if no args specified, show usage
+if [[ $# -le 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 
-# start the daemons
-for slave in $SLAVE_NAMES ; do
- ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
-   2>&1 | sed "s/^/$slave: /" &
- if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
-   sleep $HADOOP_SLAVE_SLEEP
- fi
-done
+hadoop_connect_to_hosts "$@"
 
-wait

+ 26 - 12
hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

@@ -15,24 +15,38 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+echo "This script is deprecated. Use start-dfs.sh and start-yarn.sh instead."
+exit 1
 
-# Start all hadoop daemons.  Run this on master node.
 
-echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
 # start hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
 fi
 
 # start yarn daemons if yarn is present
-if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
-  "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
+  "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" --config "${HADOOP_CONF_DIR}"
 fi
+
+
+

+ 25 - 11
hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh

@@ -18,21 +18,35 @@
 
 # Stop all hadoop daemons.  Run this on master node.
 
-echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
+exit 1
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
 
 # stop hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"
 fi
 
 # stop yarn daemons if yarn is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" --config "${HADOOP_CONF_DIR}"
 fi
+

+ 372 - 51
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -1,5 +1,4 @@
-# Copyright 2011 The Apache Software Foundation
-# 
+#
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -18,71 +17,393 @@
 
 # Set Hadoop-specific environment variables here.
 
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
+##
+## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
+## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS.  THEREFORE,
+## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
+## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
+##
+## Precedence rules:
+##
+## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
+##
+## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
+##
+
+# Many of the options here are built from the perspective that users
+# may want to provide OVERWRITING values on the command line.
+# For example:
+#
+#  JAVA_HOME=/usr/java/testing hdfs dfs -ls
+#
+# Therefore, the vast majority (BUT NOT ALL!) of these defaults
+# are configured for substitution and not append.  If you would
+# like append, you'll # need to modify this file accordingly.
+
+###
+# Generic settings for HADOOP
+###
+
+# Technically, the only required environment variable is JAVA_HOME.
+# All others are optional.  However, our defaults are probably not
+# your defaults.  Many sites configure these options outside of Hadoop,
+# such as in /etc/profile.d
 
 # The java implementation to use.
-export JAVA_HOME=${JAVA_HOME}
+export JAVA_HOME=${JAVA_HOME:-"hadoop-env.sh is not configured"}
 
-# The jsvc implementation to use. Jsvc is required to run secure datanodes.
-#export JSVC_HOME=${JSVC_HOME}
+# Location of Hadoop's configuration information.  i.e., where this
+# file is probably living.  You will almost certainly want to set
+# this in /etc/profile.d or equivalent.
+# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
 
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+# The maximum amount of heap to use, in MB. Default is 1024.
+# export HADOOP_HEAPSIZE=1024
 
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-  if [ "$HADOOP_CLASSPATH" ]; then
-    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-  else
-    export HADOOP_CLASSPATH=$f
-  fi
-done
+# Extra Java runtime options for all Hadoop commands. We don't support
+# IPv6 yet/still, so by default we set preference to IPv4.
+# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
 
-# The maximum amount of heap to use, in MB. Default is 1000.
-#export HADOOP_HEAPSIZE=
-#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+# Some parts of the shell code may do special things dependent upon
+# the operating system.  We have to set this here. See the next
+# section as to why....
+export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
 
-MAC_OSX=false
-case "`uname`" in
-Darwin*) MAC_OSX=true;;
+# Under certain conditions, Java on OS X will throw SCDynamicStore errors
+# in the system logs.
+# See HADOOP-8719 for more information.  If you need Kerberos
+# support on OS X, you'll want to change/remove this extra bit.
+case ${HADOOP_OS_TYPE} in
+  Darwin*)
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
+  ;;
 esac
-if $MAC_OSX; then
-    export HADOOP_OPTS="$HADOOP_OPTS -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
-fi
 
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+# Extra Java runtime options for Hadoop clients (i.e., hdfs dfs -blah)
+# These get added to HADOOP_OPTS for such commands.  In most cases,
+# this should be left empty and let users supply it on the
+# command line.
+# extra HADOOP_CLIENT_OPTS=""
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+#
+# A note about classpaths.
+#
+# The classpath is configured such that entries are stripped prior
+# to handing to Java based either upon duplication or non-existence.
+# Wildcards and/or directories are *NOT* expanded as the
+# de-duplication is fairly simple.  So if two directories are in
+# the classpath that both contain awesome-methods-1.0.jar,
+# awesome-methods-1.0.jar will still be seen by java.  But if
+# the classpath specifically has awesome-methods-1.0.jar from the
+# same directory listed twice, the last one will be removed.
+#
 
-export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
-export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+# An additional, custom CLASSPATH.  This is really meant for
+# end users, but as an administrator, one might want to push
+# something extra in here too, such as the jar to the topology
+# method.  Just be sure to append to the existing HADOOP_USER_CLASSPATH
+# so end users have a way to add stuff.
+# export HADOOP_USER_CLASSPATH="/some/cool/path/on/your/machine"
 
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
-#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+# Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
+# export HADOOP_USER_CLASSPATH_FIRST="yes"
 
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+###
+# Options for remote shell connectivity
+###
 
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+# There are some optional components of hadoop that allow for
+# command and control of remote hosts.  For example,
+# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
 
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+# Options to pass to SSH when one of the "log into a host and
+# start/stop daemons" scripts is executed
+# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
 
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by 
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# The built-in ssh handler will limit itself to 10 simultaneous connections.
+# For pdsh users, this sets the fanout size ( -f )
+# Change this to increase/decrease as necessary.
+# export HADOOP_SSH_PARALLEL=10
+
+# Filename which contains all of the hosts for any remote execution
+# helper scripts # such as slaves.sh, start-dfs.sh, etc.
+# export HADOOP_SLAVES="${HADOOP_CONF_DIR}/slaves"
+
+###
+# Options for all daemons
+###
+#
+
+#
+# You can define variables right here and then re-use them later on.
+# For example, it is common to use the same garbage collection settings
+# for all the daemons.  So we could define:
+#
+# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
+#
+# .. and then use it as per the b option under the namenode.
+
+# Where (primarily) daemon log files are stored.
+# $HADOOP_PREFIX/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
 
 # A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
+# This is used in writing log and pid files, so keep that in mind!
+# export HADOOP_IDENT_STRING=$USER
+
+# How many seconds to pause after stopping a daemon
+# export HADOOP_STOP_TIMEOUT=5
+
+# Where pid files are stored.  /tmp by default.
+# export HADOOP_PID_DIR=/tmp
+
+# Default log level and output location
+# This sets the hadoop.root.logger property
+# export HADOOP_ROOT_LOGGER=INFO,console
+
+# Default log level for daemons spawned explicitly by hadoop-daemon.sh
+# This sets the hadoop.root.logger property
+# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
+
+# Default log level and output location for security-related messages.
+# It sets -Dhadoop.security.logger on the command line.
+# You will almost certainly want to change this on a per-daemon basis!
+# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
+
+# Default log level for file system audit messages.
+# It sets -Dhdfs.audit.logger on the command line.
+# You will almost certainly want to change this on a per-daemon basis!
+# export HADOOP_AUDIT_LOGGER=INFO,NullAppender
+
+# Default process priority level
+# Note that sub-processes will also run at this level!
+# export HADOOP_NICENESS=0
+
+# Default name for the service level authorization file
+# export HADOOP_POLICYFILE="hadoop-policy.xml"
+
+###
+# Secure/privileged execution
+###
+
+#
+# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
+# on privileged ports.  This functionality can be replaced by providing
+# custom functions.  See hadoop-functions.sh for more information.
+#
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
+# export JSVC_HOME=/usr/bin
+
+#
+# This directory contains pids for secure and privileged processes.
+#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
+
+#
+# This directory contains the logs for secure and privileged processes.
+# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
+
+#
+# When running a secure daemon, the default value of HADOOP_IDENT_STRING
+# ends up being a bit bogus.  Therefore, by default, the code will
+# replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER.  If you want
+# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
+# export HADOOP_SECURE_IDENT_PRESERVE="true"
+
+###
+# NameNode specific parameters
+###
+# Specify the JVM options to be used when starting the NameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# a) Set JMX options
+# export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
+#
+# b) Set garbage collection logs
+# export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+#
+# c) ... or set them directly
+# export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+
+# this is the default:
+# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+
+###
+# SecondaryNameNode specific parameters
+###
+# Specify the JVM options to be used when starting the SecondaryNameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+
+###
+# DataNode specific parameters
+###
+# Specify the JVM options to be used when starting the DataNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+# This **MUST** be uncommented to enable secure HDFS!
+# export HADOOP_SECURE_DN_USER=hdfs
+
+# Supplemental options for secure datanodes
+# By default, we use jsvc which needs to know to launch a
+# server jvm.
+# export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
+
+# Where datanode log files are stored in the secure data environment.
+# export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
+
+# Where datanode pid files are stored in the secure data environment.
+# export HADOOP_SECURE_DN_PID_DIR=${HADOOP_SECURE_PID_DIR}
+
+###
+# NFS3 Gateway specific parameters
+###
+# Specify the JVM options to be used when starting the NFS3 Gateway.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_NFS3_OPTS=""
+
+# Specify the JVM options to be used when starting the Hadoop portmapper.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_PORTMAP_OPTS="-Xmx512m"
+
+# Supplemental options for priviliged gateways
+# By default, we use jsvc which needs to know to launch a
+# server jvm.
+# export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
+
+# On privileged gateways, user to run the gateway as after dropping privileges
+# export HADOOP_PRIVILEGED_NFS_USER=nfsserver
+
+###
+# ZKFailoverController specific parameters
+###
+# Specify the JVM options to be used when starting the ZKFailoverController.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_ZKFC_OPTS=""
+
+###
+# QuorumJournalNode specific parameters
+###
+# Specify the JVM options to be used when starting the QuorumJournalNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_JOURNALNODE_OPTS=""
+
+###
+# HDFS Balancer specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Balancer.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_BALANCER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+#
+# When building Hadoop, you can add the class paths to your commands
+# via this special env var:
+# HADOOP_ENABLE_BUILD_PATHS="true"
+
+# You can do things like replace parts of the shell underbelly.
+# Most of this code is in hadoop-functions.sh.
+#
+#
+# For example, if you want to add compression to the rotation
+# menthod for the .out files that daemons generate, you can do
+# that by redefining the hadoop_rotate_log function by
+# uncommenting this code block:
+
+#function hadoop_rotate_log
+#{
+#  #
+#  # log rotation (mainly used for .out files)
+#  # Users are likely to replace this one for something
+#  # that gzips or uses dates or who knows what.
+#  #
+#  # be aware that &1 and &2 might go through here
+#  # so don't do anything too crazy...
+#  #
+#  local log=$1;
+#  local num=${2:-5};
+#
+#  if [[ -f "${log}" ]]; then # rotate logs
+#    while [[ ${num} -gt 1 ]]; do
+#      #shellcheck disable=SC2086
+#      let prev=${num}-1
+#      if [[ -f "${log}.${prev}" ]]; then
+#        mv "${log}.${prev}" "${log}.${num}"
+#      fi
+#      num=${prev}
+#    done
+#    mv "${log}" "${log}.${num}"
+#    gzip -9 "${log}.${num}"
+#  fi
+#}
+#
+#
+# Another example:  finding java
+#
+# By default, Hadoop assumes that $JAVA_HOME is always defined
+# outside of its configuration. Eons ago, Apple standardized
+# on a helper program called java_home to find it for you.
+#
+#function hadoop_java_setup
+#{
+#
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#     case $HADOOP_OS_TYPE in
+#       Darwin*)
+#          JAVA_HOME=$(/usr/libexec/java_home)
+#          ;;
+#     esac
+#  fi
+#
+#  # Bail if we did not detect it
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#    echo "ERROR: JAVA_HOME is not set and could not be found." 1>&2
+#    exit 1
+#  fi
+#
+#  if [[ ! -d "${JAVA_HOME}" ]]; then
+#     echo "ERROR: JAVA_HOME (${JAVA_HOME}) does not exist." 1>&2
+#     exit 1
+#  fi
+#
+#  JAVA="${JAVA_HOME}/bin/java"
+#
+#  if [[ ! -x ${JAVA} ]]; then
+#    echo "ERROR: ${JAVA} is not executable." 1>&2
+#    exit 1
+#  fi
+#  JAVA_HEAP_MAX=-Xmx1g
+#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
+#
+#  # check envvars which might override default args
+#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+#  fi
+#}
+
+

+ 16 - 14
hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties

@@ -12,19 +12,22 @@
 
 #datanode.sink.file.filename=datanode-metrics.out
 
-# the following example split metrics of different
-# context to different sinks (in this case files)
-#jobtracker.sink.file_jvm.context=jvm
-#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
-#jobtracker.sink.file_mapred.context=mapred
-#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
 
-#tasktracker.sink.file.filename=tasktracker-metrics.out
+#nodemanager.sink.file.filename=nodemanager-metrics.out
 
-#maptask.sink.file.filename=maptask-metrics.out
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
 
-#reducetask.sink.file.filename=reducetask-metrics.out
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
 
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
 
 #
 # Below are for sending metrics to Ganglia
@@ -56,11 +59,10 @@
 
 #datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 
-#jobtracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#tasktracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 
-#maptask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 
-#reducetask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
 
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -1,8 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-
- Copyright 2011 The Apache Software Foundation
  
  Licensed to the Apache Software Foundation (ASF) under one
  or more contributor license agreements.  See the NOTICE file

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -1,5 +1,3 @@
-# Copyright 2011 The Apache Software Foundation
-# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information

+ 666 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,4 +1,670 @@
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.4.1 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.4.1 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.4.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2081">YARN-2081</a>.
+     Minor bug reported by Hong Zhiguo and fixed by Hong Zhiguo (applications/distributed-shell)<br>
+     <b>TestDistributedShell fails after YARN-1962</b><br>
+     <blockquote>java.lang.AssertionError: expected:&lt;1&gt; but was:&lt;0&gt;
+        at org.junit.Assert.fail(Assert.java:88)
+        at org.junit.Assert.failNotEquals(Assert.java:743)
+        at org.junit.Assert.assertEquals(Assert.java:118)
+        at org.junit.Assert.assertEquals(Assert.java:555)
+        at org.junit.Assert.assertEquals(Assert.java:542)
+        at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:198)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2066">YARN-2066</a>.
+     Minor bug reported by Ted Yu and fixed by Hong Zhiguo <br>
+     <b>Wrong field is referenced in GetApplicationsRequestPBImpl#mergeLocalToBuilder()</b><br>
+     <blockquote>{code}
+    if (this.finish != null) {
+      builder.setFinishBegin(start.getMinimumLong());
+      builder.setFinishEnd(start.getMaximumLong());
+    }
+{code}
+this.finish should be referenced in the if block.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2053">YARN-2053</a>.
+     Major sub-task reported by Sumit Mohanty and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Slider AM fails to restart: NPE in RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts</b><br>
+     <blockquote>Slider AppMaster restart fails with the following:
+{code}
+org.apache.hadoop.yarn.proto.YarnServiceProtos$RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts(YarnServiceProtos.java:2700)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2016">YARN-2016</a>.
+     Major bug reported by Venkat Ranganathan and fixed by Junping Du (resourcemanager)<br>
+     <b>Yarn getApplicationRequest start time range is not honored</b><br>
+     <blockquote>When we query for the previous applications by creating an instance of GetApplicationsRequest and setting the start time range and application tag, we see that the start range provided is not honored and all applications with the tag are returned
+
+Attaching a reproducer.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1986">YARN-1986</a>.
+     Critical bug reported by Jon Bringhurst and fixed by Hong Zhiguo <br>
+     <b>In Fifo Scheduler, node heartbeat in between creating app and attempt causes NPE</b><br>
+     <blockquote>After upgrade from 2.2.0 to 2.4.0, NPE on first job start.
+
+-After RM was restarted, the job runs without a problem.-
+
+{noformat}
+19:11:13,441 FATAL ResourceManager:600 - Error in handling event type NODE_UPDATE to the scheduler
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.assignContainers(FifoScheduler.java:462)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.nodeUpdate(FifoScheduler.java:714)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:743)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:104)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:591)
+	at java.lang.Thread.run(Thread.java:744)
+19:11:13,443  INFO ResourceManager:604 - Exiting, bbye..
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1976">YARN-1976</a>.
+     Major bug reported by Yesha Vora and fixed by Junping Du <br>
+     <b>Tracking url missing http protocol for FAILED application</b><br>
+     <blockquote>Run yarn application -list -appStates FAILED,  It does not print http protocol name like FINISHED apps.
+
+{noformat}
+-bash-4.1$ yarn application -list -appStates FINISHED,FAILED,KILLED
+14/04/15 23:55:07 INFO client.RMProxy: Connecting to ResourceManager at host
+Total number of applications (application-types: [] and states: [FINISHED, FAILED, KILLED]):4
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+application_1397598467870_0004	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0004
+application_1397598467870_0003	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0003
+application_1397598467870_0002	           Sleep job	           MAPREDUCE	    hrt_qa	   default	            FAILED	            FAILED	           100%	host:8088/cluster/app/application_1397598467870_0002
+application_1397598467870_0001	          word count	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0001
+{noformat}
+
+It only prints 'host:8088/cluster/app/application_1397598467870_0002' instead 'http://host:8088/cluster/app/application_1397598467870_0002' </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1975">YARN-1975</a>.
+     Major bug reported by Nathan Roberts and fixed by Mit Desai (resourcemanager)<br>
+     <b>Used resources shows escaped html in CapacityScheduler and FairScheduler page</b><br>
+     <blockquote>Used resources displays as &amp;amp;lt;memory:1111, vCores;&amp;amp;gt; with capacity scheduler
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1962">YARN-1962</a>.
+     Major sub-task reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>Timeline server is enabled by default</b><br>
+     <blockquote>Since Timeline server is not matured and secured yet, enabling  it by default might create some confusion.
+
+We were playing with 2.4.0 and found a lot of exceptions for distributed shell example related to connection refused error. Btw, we didn't run TS because it is not secured yet.
+
+Although it is possible to explicitly turn it off through yarn-site config. In my opinion,  this extra change for this new service is not worthy at this point,.  
+
+This JIRA is to turn it off by default.
+If there is an agreement, i can put a simple patch about this.
+
+{noformat}
+14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;in14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;init&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 moreit&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 more
+
+{noformat}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1957">YARN-1957</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+     <b>ProportionalCapacitPreemptionPolicy handling of corner cases...</b><br>
+     <blockquote>The current version of ProportionalCapacityPreemptionPolicy should be improved to deal with the following two scenarios:
+1) when rebalancing over-capacity allocations, it potentially preempts without considering the maxCapacity constraints of a queue (i.e., preempting possibly more than strictly necessary)
+2) a zero capacity queue is preempted even if there is no demand (coherent with old use of zero-capacity to disabled queues)
+
+The proposed patch fixes both issues, and introduce few new test cases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1947">YARN-1947</a>.
+     Major test reported by Jian He and fixed by Jian He <br>
+     <b>TestRMDelegationTokens#testRMDTMasterKeyStateOnRollingMasterKey is failing intermittently</b><br>
+     <blockquote>java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.security.TestRMDelegationTokens.testRMDTMasterKeyStateOnRollingMasterKey(TestRMDelegationTokens.java:117)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1934">YARN-1934</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Potential NPE in ZKRMStateStore caused by handling Disconnected event from ZK.</b><br>
+     <blockquote>For ZK disconnected event , zkClient is set to null. It is very much prone to throw NPE.
+
+{noformat}
+        case Disconnected:
+          LOG.info("ZKRMStateStore Session disconnected");
+          oldZkClient = zkClient;
+          zkClient = null;
+          break;
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1933">YARN-1933</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestAMRestart and TestNodeHealthService failing sometimes on Windows</b><br>
+     <blockquote>TestNodeHealthService failures:
+testNodeHealthScript(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 1.405 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (The process cannot access the file because it is being used by another process)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScript(TestNodeHealthService.java:154)
+
+testNodeHealthScriptShouldRun(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 0 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (Access is denied)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScriptShouldRun(TestNodeHealthService.java:103)
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1932">YARN-1932</a>.
+     Blocker bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>Javascript injection on the job status page</b><br>
+     <blockquote>Scripts can be injected into the job status page as the diagnostics field is
+not sanitized. Whatever string you set there will show up to the jobs page as it is ... ie. if you put any script commands, they will be executed in the browser of the user who is opening the page.
+
+We need escaping the diagnostic string in order to not run the scripts.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1931">YARN-1931</a>.
+     Blocker bug reported by Thomas Graves and fixed by Sandy Ryza (applications)<br>
+     <b>Private API change in YARN-1824 in 2.4 broke compatibility with previous releases</b><br>
+     <blockquote>YARN-1824 broke compatibility with previous 2.x releases by changes the API's in org.apache.hadoop.yarn.util.Apps.{setEnvFromInputString,addToEnvironment}  The old api should be added back in.
+
+This affects any ApplicationMasters who were using this api.  It also breaks previously built MapReduce libraries from working with the new Yarn release as MR uses this api. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1929">YARN-1929</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>DeadLock in RM when automatic failover is enabled.</b><br>
+     <blockquote>Dead lock detected  in RM when automatic failover is enabled.
+
+
+{noformat}
+Found one Java-level deadlock:
+=============================
+"Thread-2":
+  waiting to lock monitor 0x00007fb514303cf0 (object 0x00000000ef153fd0, a org.apache.hadoop.ha.ActiveStandbyElector),
+  which is held by "main-EventThread"
+"main-EventThread":
+  waiting to lock monitor 0x00007fb514750a48 (object 0x00000000ef154020, a org.apache.hadoop.yarn.server.resourcemanager.EmbeddedElectorService),
+  which is held by "Thread-2"
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1928">YARN-1928</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestAMRMRPCNodeUpdates fails ocassionally</b><br>
+     <blockquote>{code}
+junit.framework.AssertionFailedError: expected:&lt;0&gt; but was:&lt;4&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCNodeUpdates.testAMRMUnusableNodes(TestAMRMRPCNodeUpdates.java:136)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1926">YARN-1926</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>DistributedShell unit tests fail on Windows</b><br>
+     <blockquote>Couple of unit tests for the DistributedShell fail on Windows - specifically testDSShellWithShellScript and testDSRestartWithPreviousRunningContainers </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1924">YARN-1924</a>.
+     Critical bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>STATE_STORE_OP_FAILED happens when ZKRMStateStore tries to update app(attempt) before storing it</b><br>
+     <blockquote>Noticed on a HA cluster Both RM shut down with this error. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1920">YARN-1920</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestFileSystemApplicationHistoryStore.testMissingApplicationAttemptHistoryData fails in windows</b><br>
+     <blockquote>Though this was only failing in Windows, after debugging, I realized that the test fails because we are leaking a file-handle in the history service.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1914">YARN-1914</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>Test TestFSDownload.testDownloadPublicWithStatCache fails on Windows</b><br>
+     <blockquote>The TestFSDownload.testDownloadPublicWithStatCache test in hadoop-yarn-common consistently fails on Windows environments.
+
+The root cause is that the test checks for execute permission for all users on every ancestor of the target directory. In windows, by default, group "Everyone" has no permissions on any directory in the install drive. It's unreasonable to expect this test to pass and we should skip it on Windows.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1910">YARN-1910</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestAMRMTokens fails on windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1908">YARN-1908</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
+     <b>Distributed shell with custom script has permission error.</b><br>
+     <blockquote>Create test1.sh having "pwd".
+
+Run this command as user1:
+hadoop jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -shell_script test1.sh
+
+NM is run by yarn user. An exception is thrown because yarn user has no permissions on custom script in hdfs path. The custom script is created with distributed shell app.
+{code}
+Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=yarn, access=WRITE, inode="/user/user1/DistributedShell/70":user1:user1:drwxr-xr-x
+	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:265)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1907">YARN-1907</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMApplicationHistoryWriter#testRMWritingMassiveHistory runs slow and intermittently fails</b><br>
+     <blockquote>The test has 10000 containers that it tries to cleanup.
+The cleanup has a timeout of 20000ms in which the test sometimes cannot do the cleanup completely and gives out an Assertion Failure.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1905">YARN-1905</a>.
+     Trivial test reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager)<br>
+     <b>TestProcfsBasedProcessTree must only run on Linux.</b><br>
+     <blockquote>The tests in {{TestProcfsBasedProcessTree}} only make sense on Linux, where the process tree calculations are based on reading the /proc file system.  Right now, not all of the individual tests are skipped when the OS is not Linux.  This patch will make it consistent.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1903">YARN-1903</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Killing Container on NEW and LOCALIZING will result in exitCode and diagnostics not set</b><br>
+     <blockquote>The container status after stopping container is not expected.
+{code}
+java.lang.AssertionError: 4: 
+	at org.junit.Assert.fail(Assert.java:93)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testGetContainerStatus(TestNMClient.java:382)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testContainerManagement(TestNMClient.java:346)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testNMClient(TestNMClient.java:226)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1898">YARN-1898</a>.
+     Major sub-task reported by Yesha Vora and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Standby RM's conf, stacks, logLevel, metrics, jmx and logs links are redirecting to Active RM</b><br>
+     <blockquote>Standby RM links /conf, /stacks, /logLevel, /metrics, /jmx is redirected to Active RM.
+
+It should not be redirected to Active RM</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1892">YARN-1892</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Jian He (scheduler)<br>
+     <b>Excessive logging in RM</b><br>
+     <blockquote>Mostly in the CS I believe
+
+{code}
+ INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt: Application application_1395435468498_0011 reserved container container_1395435468498_0011_01_000213 on node host:  #containers=5 available=4096 used=20960, currently has 1 at priority 4; currentReservation 4096
+{code}
+
+{code}
+INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue: hive2 usedResources: &lt;memory:20480, vCores:5&gt; clusterResources: &lt;memory:81920, vCores:16&gt; currentCapacity 0.25 required &lt;memory:4096, vCores:1&gt; potentialNewCapacity: 0.255 (  max-capacity: 0.25)
+{code}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1883">YARN-1883</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMAdminService fails due to inconsistent entries in UserGroups</b><br>
+     <blockquote>testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider fails with the following error:
+{noformat}
+java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider(TestRMAdminService.java:421)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testOrder(TestRMAdminService.java:104)
+{noformat}
+
+Line Numbers will be inconsistent as I was testing to run it in a particular order. But the Line on which the failure occurs is
+{code}
+Assert.assertTrue(groupBefore.contains("test_group_A")
+        &amp;&amp; groupBefore.contains("test_group_B")
+        &amp;&amp; groupBefore.contains("test_group_C") &amp;&amp; groupBefore.size() == 3);
+{code}
+
+testRMInitialsWithFileSystemBasedConfigurationProvider() and
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider()
+calls the function {{MockUnixGroupsMapping.updateGroups();}} which changes the list of userGroups.
+
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() tries to verify the groups before changing it and fails if testRMInitialsWithFileSystemBasedConfigurationProvider() already ran and made the changes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1861">YARN-1861</a>.
+     Blocker sub-task reported by Arpit Gupta and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Both RM stuck in standby mode when automatic failover is enabled</b><br>
+     <blockquote>In our HA tests we noticed that the tests got stuck because both RM's got into standby state and no one became active.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1837">YARN-1837</a>.
+     Major bug reported by Tsuyoshi OZAWA and fixed by Hong Zhiguo <br>
+     <b>TestMoveApplication.testMoveRejectedByScheduler randomly fails</b><br>
+     <blockquote>TestMoveApplication#testMoveRejectedByScheduler fails because of NullPointerException. It looks caused by unhandled exception handling at server-side.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1750">YARN-1750</a>.
+     Major test reported by Ming Ma and fixed by Wangda Tan (nodemanager)<br>
+     <b>TestNodeStatusUpdater#testNMRegistration is incorrect in test case</b><br>
+     <blockquote>This test case passes. However, the test output log has
+
+java.lang.AssertionError: Number of applications should only be one! expected:&lt;1&gt; but was:&lt;2&gt;
+        at org.junit.Assert.fail(Assert.java:93)
+        at org.junit.Assert.failNotEquals(Assert.java:647)
+        at org.junit.Assert.assertEquals(Assert.java:128)
+        at org.junit.Assert.assertEquals(Assert.java:472)
+        at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyResourceTracker.nodeHeartbeat(TestNodeStatusUpdater.java:267)
+        at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl$1.run(NodeStatusUpdaterImpl.java:469)
+        at java.lang.Thread.run(Thread.java:695)
+
+TestNodeStatusUpdater.java has invalid asserts.
+
+      } else if (heartBeatID == 3) {
+        // Checks on the RM end
+        Assert.assertEquals("Number of applications should only be one!", 1,
+            appToContainers.size());
+        Assert.assertEquals("Number of container for the app should be two!",
+            2, appToContainers.get(appId2).size());
+
+
+We should fix the assert and add more check to the test.
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1701">YARN-1701</a>.
+     Major sub-task reported by Gera Shegalov and fixed by Tsuyoshi OZAWA <br>
+     <b>Improve default paths of timeline store and generic history store</b><br>
+     <blockquote>When I enable AHS via yarn.ahs.enabled, the app history is still not visible in AHS webUI. This is due to NullApplicationHistoryStore as yarn.resourcemanager.history-writer.class. It would be good to have just one key to enable basic functionality.
+
+yarn.ahs.fs-history-store.uri uses {code}${hadoop.log.dir}{code}, which is local file system location. However, FileSystemApplicationHistoryStore uses DFS by default.  </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1696">YARN-1696</a>.
+     Blocker sub-task reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Document RM HA</b><br>
+     <blockquote>Add documentation for RM HA. Marking this a blocker for 2.4 as this is required to call RM HA Stable and ready for public consumption. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1281">YARN-1281</a>.
+     Major test reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>TestZKRMStateStoreZKClientConnections fails intermittently</b><br>
+     <blockquote>The test fails intermittently - haven't been able to reproduce the failure deterministically. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1201">YARN-1201</a>.
+     Minor bug reported by Nemon Lou and fixed by Wangda Tan (resourcemanager)<br>
+     <b>TestAMAuthorization fails with local hostname cannot be resolved</b><br>
+     <blockquote>When hostname is 158-1-131-10, TestAMAuthorization fails.
+{code}
+Running org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0, Time elapsed: 14.034 sec &lt;&lt;&lt; FAILURE! - in org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+testUnauthorizedAccess[0](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.952 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+testUnauthorizedAccess[1](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.116 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+
+Results :
+
+Tests in error:
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0
+
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5843">MAPREDUCE-5843</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestMRKeyValueTextInputFormat failing on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5841">MAPREDUCE-5841</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
+     <b>uber job doesn't terminate on getting mapred job kill</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5835">MAPREDUCE-5835</a>.
+     Critical bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Killing Task might cause the job to go to ERROR state</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5833">MAPREDUCE-5833</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestRMContainerAllocator fails ocassionally</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5832">MAPREDUCE-5832</a>.
+     Major bug reported by Jian He and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Few tests in TestJobClient fail on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5830">MAPREDUCE-5830</a>.
+     Blocker bug reported by Jason Lowe and fixed by Akira AJISAKA <br>
+     <b>HostUtil.getTaskLogUrl is not backwards binary compatible with 2.3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5828">MAPREDUCE-5828</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestMapReduceJobControl fails on JDK 7 + Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5827">MAPREDUCE-5827</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestSpeculativeExecutionWithMRApp fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5826">MAPREDUCE-5826</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestHistoryServerFileSystemStateStoreService.testTokenStore fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5824">MAPREDUCE-5824</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestPipesNonJavaInputFormat.testFormat fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5821">MAPREDUCE-5821</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (performance , task)<br>
+     <b>IFile merge allocates new byte array for every value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5818">MAPREDUCE-5818</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>hsadmin cmd is missing in mapred.cmd</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5815">MAPREDUCE-5815</a>.
+     Blocker bug reported by Gera Shegalov and fixed by Akira AJISAKA (client , mrv2)<br>
+     <b>Fix NPE in TestMRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5714">MAPREDUCE-5714</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (test)<br>
+     <b>TestMRAppComponentDependencies causes surefire to exit without saying proper goodbye</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3191">MAPREDUCE-3191</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Chen He <br>
+     <b>docs for map output compression incorrectly reference SequenceFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6527">HDFS-6527</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Edit log corruption due to defered INode removal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6411">HDFS-6411</a>.
+     Major bug reported by Zhongyi Xie and fixed by Brandon Li (nfs)<br>
+     <b>nfs-hdfs-gateway mount raises I/O error and hangs when a unauthorized user attempts to access it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6402">HDFS-6402</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Suppress findbugs warning for failure to override equals and hashCode in FsAclPermission.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6397">HDFS-6397</a>.
+     Critical bug reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>NN shows inconsistent value in deadnode count </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6362">HDFS-6362</a>.
+     Blocker bug reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>InvalidateBlocks is inconsistent in usage of DatanodeUuid and StorageID</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6361">HDFS-6361</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (nfs)<br>
+     <b>TestIdUserGroup.testUserUpdateSetting failed due to out of range nfsnobody Id</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6340">HDFS-6340</a>.
+     Blocker bug reported by Rahul Singhal and fixed by Rahul Singhal (datanode)<br>
+     <b>DN can't finalize upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6329">HDFS-6329</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>WebHdfs does not work if HA is enabled on NN but logical URI is not configured.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6326">HDFS-6326</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHdfs ACL compatibility is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6325">HDFS-6325</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Keith Pak (namenode)<br>
+     <b>Append should fail if the last block has insufficient number of replicas</b><br>
+     <blockquote>I have committed the fix to the trunk, branch-2, and branch-2.4 respectively. Thanks Keith!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6313">HDFS-6313</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Kihwal Lee (webhdfs)<br>
+     <b>WebHdfs may use the wrong NN when configured for multiple HA NNs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6245">HDFS-6245</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Agarwal <br>
+     <b>datanode fails to start with a bad disk even when failed volumes is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6236">HDFS-6236</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>ImageServlet should use Time#monotonicNow to measure latency.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6235">HDFS-6235</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode , test)<br>
+     <b>TestFileJournalManager can fail on Windows due to file locking if tests run out of order.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6234">HDFS-6234</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode , test)<br>
+     <b>TestDatanodeConfig#testMemlockLimit fails on Windows due to invalid file path.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6232">HDFS-6232</a>.
+     Major bug reported by Stephen Chu and fixed by Akira AJISAKA (tools)<br>
+     <b>OfflineEditsViewer throws a NPE on edits containing ACL modifications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6231">HDFS-6231</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (hdfs-client)<br>
+     <b>DFSClient hangs infinitely if using hedged reads and all eligible datanodes die.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6229">HDFS-6229</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (ha)<br>
+     <b>Race condition in failover can cause RetryCache fail to work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6215">HDFS-6215</a>.
+     Minor bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Wrong error message for upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6209">HDFS-6209</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix flaky test TestValidateConfigurationSettings.testThatDifferentRPCandHttpPortsAreOK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6208">HDFS-6208</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode caching can leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6206">HDFS-6206</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze <br>
+     <b>DFSUtil.substituteForWildcardAddress may throw NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6204">HDFS-6204</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestRBWBlockInvalidation may fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6198">HDFS-6198</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode rolling upgrade does not correctly identify current block pool directory and replace with trash on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6197">HDFS-6197</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Rolling upgrade rollback on Windows can fail attempting to rename edit log segment files to a destination that already exists.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6189">HDFS-6189</a>.
+     Major test reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Multiple HDFS tests fail on Windows attempting to use a test root path containing a colon.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4052">HDFS-4052</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>BlockManager#invalidateWork should print logs outside the lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2882">HDFS-2882</a>.
+     Major bug reported by Todd Lipcon and fixed by Vinayakumar B (datanode)<br>
+     <b>DN continues to start up, even if block pool fails to initialize</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10612">HADOOP-10612</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS failed to refresh the user group id mapping table</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10562">HADOOP-10562</a>.
+     Critical bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Namenode exits on exception without printing stack trace in AbstractDelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10527">HADOOP-10527</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Fix incorrect return code and allow more retries on EINTR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10522">HADOOP-10522</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>JniBasedUnixGroupMapping mishandles errors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10490">HADOOP-10490</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestMapFile and TestBloomMapFile leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10473">HADOOP-10473</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestCallQueueManager is still flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10466">HADOOP-10466</a>.
+     Minor improvement reported by Nicolas Liochon and fixed by Nicolas Liochon (security)<br>
+     <b>Lower the log level in UserGroupInformation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10456">HADOOP-10456</a>.
+     Major bug reported by Nishkam Ravi and fixed by Nishkam Ravi (conf)<br>
+     <b>Bug in Configuration.java exposed by Spark (ConcurrentModificationException)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10455">HADOOP-10455</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (ipc)<br>
+     <b>When there is an exception, ipc.Server should first check whether it is an terse exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8826">HADOOP-8826</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Mit Desai <br>
+     <b>Docs still refer to 0.20.205 as stable line</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.4.0 Release Notes</title>
 <STYLE type="text/css">
 	H1 {font-family: sans-serif}

+ 166 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -78,6 +78,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
@@ -107,8 +110,9 @@ import com.google.common.base.Preconditions;
  *
  * <p>Unless explicitly turned off, Hadoop by default specifies two 
  * resources, loaded in-order from the classpath: <ol>
- * <li><tt><a href="{@docRoot}/../core-default.html">core-default.xml</a>
- * </tt>: Read-only defaults for hadoop.</li>
+ * <li><tt>
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ * core-default.xml</a></tt>: Read-only defaults for hadoop.</li>
  * <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
  * installation.</li>
  * </ol>
@@ -423,7 +427,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       new DeprecationDelta("fs.default.name", 
         CommonConfigurationKeys.FS_DEFAULT_NAME_KEY),
       new DeprecationDelta("dfs.umaskmode",
-        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY)
+        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY),
+      new DeprecationDelta("dfs.nfs.exports.allowed.hosts",
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY)
     };
 
   /**
@@ -566,6 +572,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    */
   private String[] handleDeprecation(DeprecationContext deprecations,
       String name) {
+    if (null != name) {
+      name = name.trim();
+    }
     ArrayList<String > names = new ArrayList<String>();
 	if (isDeprecated(name)) {
       DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
@@ -797,14 +806,16 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     reloadConfiguration();
   }
   
-  private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
-  private static int MAX_SUBST = 20;
+  private static final Pattern VAR_PATTERN =
+      Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
+
+  private static final int MAX_SUBST = 20;
 
   private String substituteVars(String expr) {
     if (expr == null) {
       return null;
     }
-    Matcher match = varPat.matcher("");
+    Matcher match = VAR_PATTERN.matcher("");
     String eval = expr;
     Set<String> evalSet = new HashSet<String>();
     for(int s=0; s<MAX_SUBST; s++) {
@@ -841,12 +852,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   /**
    * Get the value of the <code>name</code> property, <code>null</code> if
    * no such property exists. If the key is deprecated, it returns the value of
-   * the first key which replaces the deprecated key and is not null
+   * the first key which replaces the deprecated key and is not null.
    * 
    * Values are processed for <a href="#VariableExpansion">variable expansion</a> 
    * before being returned. 
    * 
-   * @param name the property name.
+   * @param name the property name, will be trimmed before get value.
    * @return the value of the <code>name</code> or its replacing property, 
    *         or null if no such property exists.
    */
@@ -950,7 +961,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   /** 
    * Set the <code>value</code> of the <code>name</code> property. If 
    * <code>name</code> is deprecated or there is a deprecated name associated to it,
-   * it sets the value to both names.
+   * it sets the value to both names. Name will be trimmed before put into
+   * configuration.
    * 
    * @param name property name.
    * @param value property value.
@@ -962,7 +974,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   /** 
    * Set the <code>value</code> of the <code>name</code> property. If 
    * <code>name</code> is deprecated, it also sets the <code>value</code> to
-   * the keys that replace the deprecated key.
+   * the keys that replace the deprecated key. Name will be trimmed before put
+   * into configuration.
    *
    * @param name property name.
    * @param value property value.
@@ -977,6 +990,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     Preconditions.checkArgument(
         value != null,
         "The value of property " + name + " must not be null");
+    name = name.trim();
     DeprecationContext deprecations = deprecationContext.get();
     if (deprecations.getDeprecatedKeyMap().isEmpty()) {
       getProps();
@@ -1062,7 +1076,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * If no such property exists,
    * then <code>defaultValue</code> is returned.
    * 
-   * @param name property name.
+   * @param name property name, will be trimmed before get value.
    * @param defaultValue default value.
    * @return property value, or <code>defaultValue</code> if the property 
    *         doesn't exist.                    
@@ -1757,6 +1771,111 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     set(name, StringUtils.arrayToString(values));
   }
 
+  /**
+   * Get the value for a known password configuration element.
+   * In order to enable the elimination of clear text passwords in config,
+   * this method attempts to resolve the property name as an alias through
+   * the CredentialProvider API and conditionally fallsback to config.
+   * @param name property name
+   * @return password
+   */
+  public char[] getPassword(String name) throws IOException {
+    char[] pass = null;
+
+    pass = getPasswordFromCredentialProviders(name);
+
+    if (pass == null) {
+      pass = getPasswordFromConfig(name);
+    }
+
+    return pass;
+  }
+
+  /**
+   * Try and resolve the provided element name as a credential provider
+   * alias.
+   * @param name alias of the provisioned credential
+   * @return password or null if not found
+   * @throws IOException
+   */
+  protected char[] getPasswordFromCredentialProviders(String name)
+      throws IOException {
+    char[] pass = null;
+    try {
+      List<CredentialProvider> providers =
+          CredentialProviderFactory.getProviders(this);
+
+      if (providers != null) {
+        for (CredentialProvider provider : providers) {
+          try {
+            CredentialEntry entry = provider.getCredentialEntry(name);
+            if (entry != null) {
+              pass = entry.getCredential();
+              break;
+            }
+          }
+          catch (IOException ioe) {
+            throw new IOException("Can't get key " + name + " from key provider" +
+            		"of type: " + provider.getClass().getName() + ".", ioe);
+          }
+        }
+      }
+    }
+    catch (IOException ioe) {
+      throw new IOException("Configuration problem with provider path.", ioe);
+    }
+
+    return pass;
+  }
+
+  /**
+   * Fallback to clear text passwords in configuration.
+   * @param name
+   * @return clear text password or null
+   */
+  protected char[] getPasswordFromConfig(String name) {
+    char[] pass = null;
+    if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK, true)) {
+      String passStr = get(name);
+      if (passStr != null) {
+        pass = passStr.toCharArray();
+      }
+    }
+    return pass;
+  }
+
+  /**
+   * Get the socket address for <code>hostProperty</code> as a
+   * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+   * <code>null</code>, <code>addressProperty</code> will be used. This
+   * is useful for cases where we want to differentiate between host
+   * bind address and address clients should use to establish connection.
+   *
+   * @param hostProperty bind host property name.
+   * @param addressProperty address property name.
+   * @param defaultAddressValue the default value
+   * @param defaultPort the default port
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getSocketAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      int defaultPort) {
+
+    InetSocketAddress bindAddr = getSocketAddr(
+      addressProperty, defaultAddressValue, defaultPort);
+
+    final String host = get(hostProperty);
+
+    if (host == null || host.isEmpty()) {
+      return bindAddr;
+    }
+
+    return NetUtils.createSocketAddr(
+        host, bindAddr.getPort(), hostProperty);
+  }
+
   /**
    * Get the socket address for <code>name</code> property as a
    * <code>InetSocketAddress</code>.
@@ -1778,6 +1897,40 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public void setSocketAddr(String name, InetSocketAddress addr) {
     set(name, NetUtils.getHostPortString(addr));
   }
+
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address. If the host and address
+   * properties are configured the host component of the address will be combined
+   * with the port component of the addr to generate the address.  This is to allow
+   * optional control over which host name is used in multi-home bind-host
+   * cases where a host can have multiple names
+   * @param hostProperty the bind-host configuration name
+   * @param addressProperty the service address configuration name
+   * @param defaultAddressValue the service default address configuration value
+   * @param addr InetSocketAddress of the service listener
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      InetSocketAddress addr) {
+
+    final String host = get(hostProperty);
+    final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);
+
+    if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
+      //not our case, fall back to original logic
+      return updateConnectAddr(addressProperty, addr);
+    }
+
+    final String connectHost = connectHostPort.split(":")[0];
+    // Create connect address using client address hostname and server port.
+    return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
+        connectHost, addr.getPort()));
+  }
   
   /**
    * Set the socket address a client can use to connect for the
@@ -2603,7 +2756,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           item.getValue() instanceof String) {
         m = p.matcher((String)item.getKey());
         if(m.find()) { // match
-          result.put((String) item.getKey(), (String) item.getValue());
+          result.put((String) item.getKey(),
+              substituteVars(getProps().getProperty((String) item.getKey())));
         }
       }
     }

+ 174 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java

@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+/**
+ * A <code>KeyProviderExtension</code> implementation providing a short lived
+ * cache for <code>KeyVersions</code> and <code>Metadata</code>to avoid burst
+ * of requests to hit the underlying <code>KeyProvider</code>.
+ */
+public class CachingKeyProvider extends
+    KeyProviderExtension<CachingKeyProvider.CacheExtension> {
+
+  static class CacheExtension implements KeyProviderExtension.Extension {
+    private final KeyProvider provider;
+    private LoadingCache<String, KeyVersion> keyVersionCache;
+    private LoadingCache<String, KeyVersion> currentKeyCache;
+    private LoadingCache<String, Metadata> keyMetadataCache;
+
+    CacheExtension(KeyProvider prov, long keyTimeoutMillis,
+        long currKeyTimeoutMillis) {
+      this.provider = prov;
+      keyVersionCache =
+          CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
+              TimeUnit.MILLISECONDS)
+              .build(new CacheLoader<String, KeyVersion>() {
+                @Override
+                public KeyVersion load(String key) throws Exception {
+                  KeyVersion kv = provider.getKeyVersion(key);
+                  if (kv == null) {
+                    throw new KeyNotFoundException();
+                  }
+                  return kv;
+                }
+              });
+      keyMetadataCache =
+          CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
+              TimeUnit.MILLISECONDS)
+              .build(new CacheLoader<String, Metadata>() {
+                @Override
+                public Metadata load(String key) throws Exception {
+                  Metadata meta = provider.getMetadata(key);
+                  if (meta == null) {
+                    throw new KeyNotFoundException();
+                  }
+                  return meta;
+                }
+              });
+      currentKeyCache =
+          CacheBuilder.newBuilder().expireAfterWrite(currKeyTimeoutMillis,
+          TimeUnit.MILLISECONDS)
+          .build(new CacheLoader<String, KeyVersion>() {
+            @Override
+            public KeyVersion load(String key) throws Exception {
+              KeyVersion kv = provider.getCurrentKey(key);
+              if (kv == null) {
+                throw new KeyNotFoundException();
+              }
+              return kv;
+            }
+          });
+    }
+  }
+
+  @SuppressWarnings("serial")
+  private static class KeyNotFoundException extends Exception { }
+
+  public CachingKeyProvider(KeyProvider keyProvider, long keyTimeoutMillis,
+      long currKeyTimeoutMillis) {
+    super(keyProvider, new CacheExtension(keyProvider, keyTimeoutMillis,
+        currKeyTimeoutMillis));
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    try {
+      return getExtension().currentKeyCache.get(name);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName)
+      throws IOException {
+    try {
+      return getExtension().keyVersionCache.get(versionName);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    getKeyProvider().deleteKey(name);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    // invalidating all key versions as we don't know
+    // which ones belonged to the deleted key
+    getExtension().keyVersionCache.invalidateAll();
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    KeyVersion key = getKeyProvider().rollNewVersion(name, material);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    KeyVersion key = getKeyProvider().rollNewVersion(name);
+    getExtension().currentKeyCache.invalidate(name);
+    getExtension().keyMetadataCache.invalidate(name);
+    return key;
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    try {
+      return getExtension().keyMetadataCache.get(name);
+    } catch (ExecutionException ex) {
+      Throwable cause = ex.getCause();
+      if (cause instanceof KeyNotFoundException) {
+        return null;
+      } else if (cause instanceof IOException) {
+        throw (IOException) cause;
+      } else {
+        throw new IOException(cause);
+      }
+    }
+  }
+
+}

+ 237 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -26,7 +26,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.ProviderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import javax.crypto.spec.SecretKeySpec;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.ObjectInputStream;
@@ -78,6 +83,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceAudience.Private
 public class JavaKeyStoreProvider extends KeyProvider {
   private static final String KEY_METADATA = "KeyMetadata";
+  private static Logger LOG =
+      LoggerFactory.getLogger(JavaKeyStoreProvider.class);
+
   public static final String SCHEME_NAME = "jceks";
 
   public static final String KEYSTORE_PASSWORD_FILE_KEY =
@@ -101,7 +109,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
 
   private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
     this.uri = uri;
-    path = unnestUri(uri);
+    path = ProviderUtils.unnestUri(uri);
     fs = path.getFileSystem(conf);
     // Get the password file from the conf, if not present from the user's
     // environment var
@@ -113,6 +121,10 @@ public class JavaKeyStoreProvider extends KeyProvider {
       if (pwFile != null) {
         ClassLoader cl = Thread.currentThread().getContextClassLoader();
         URL pwdFile = cl.getResource(pwFile);
+        if (pwdFile == null) {
+          // Provided Password file does not exist
+          throw new IOException("Password file does not exists");
+        }
         if (pwdFile != null) {
           InputStream is = pwdFile.openStream();
           try {
@@ -127,19 +139,25 @@ public class JavaKeyStoreProvider extends KeyProvider {
       password = KEYSTORE_PASSWORD_DEFAULT;
     }
     try {
+      Path oldPath = constructOldPath(path);
+      Path newPath = constructNewPath(path);
       keyStore = KeyStore.getInstance(SCHEME_NAME);
+      FsPermission perm = null;
       if (fs.exists(path)) {
-        // save off permissions in case we need to
-        // rewrite the keystore in flush()
-        FileStatus s = fs.getFileStatus(path);
-        permissions = s.getPermission();
-
-        keyStore.load(fs.open(path), password);
+        // flush did not proceed to completion
+        // _NEW should not exist
+        if (fs.exists(newPath)) {
+          throw new IOException(
+              String.format("Keystore not loaded due to some inconsistency "
+              + "('%s' and '%s' should not exist together)!!", path, newPath));
+        }
+        perm = tryLoadFromPath(path, oldPath);
       } else {
-        permissions = new FsPermission("700");
-        // required to create an empty keystore. *sigh*
-        keyStore.load(null, password);
+        perm = tryLoadIncompleteFlush(oldPath, newPath);
       }
+      // Need to save off permissions in case we need to
+      // rewrite the keystore in flush()
+      permissions = perm;
     } catch (KeyStoreException e) {
       throw new IOException("Can't create keystore", e);
     } catch (NoSuchAlgorithmException e) {
@@ -152,6 +170,136 @@ public class JavaKeyStoreProvider extends KeyProvider {
     writeLock = lock.writeLock();
   }
 
+  /**
+   * Try loading from the user specified path, else load from the backup
+   * path in case Exception is not due to bad/wrong password
+   * @param path Actual path to load from
+   * @param backupPath Backup path (_OLD)
+   * @return The permissions of the loaded file
+   * @throws NoSuchAlgorithmException
+   * @throws CertificateException
+   * @throws IOException
+   */
+  private FsPermission tryLoadFromPath(Path path, Path backupPath)
+      throws NoSuchAlgorithmException, CertificateException,
+      IOException {
+    FsPermission perm = null;
+    try {
+      perm = loadFromPath(path, password);
+      // Remove _OLD if exists
+      if (fs.exists(backupPath)) {
+        fs.delete(backupPath, true);
+      }
+      LOG.debug("KeyStore loaded successfully !!");
+    } catch (IOException ioe) {
+      // If file is corrupted for some reason other than
+      // wrong password try the _OLD file if exits
+      if (!isBadorWrongPassword(ioe)) {
+        perm = loadFromPath(backupPath, password);
+        // Rename CURRENT to CORRUPTED
+        renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+            + System.currentTimeMillis()));
+        renameOrFail(backupPath, path);
+        LOG.debug(String.format(
+            "KeyStore loaded successfully from '%s' since '%s'"
+                + "was corrupted !!", backupPath, path));
+      } else {
+        throw ioe;
+      }
+    }
+    return perm;
+  }
+
+  /**
+   * The KeyStore might have gone down during a flush, In which case either the
+   * _NEW or _OLD files might exists. This method tries to load the KeyStore
+   * from one of these intermediate files.
+   * @param oldPath the _OLD file created during flush
+   * @param newPath the _NEW file created during flush
+   * @return The permissions of the loaded file
+   * @throws IOException
+   * @throws NoSuchAlgorithmException
+   * @throws CertificateException
+   */
+  private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
+      throws IOException, NoSuchAlgorithmException, CertificateException {
+    FsPermission perm = null;
+    // Check if _NEW exists (in case flush had finished writing but not
+    // completed the re-naming)
+    if (fs.exists(newPath)) {
+      perm = loadAndReturnPerm(newPath, oldPath);
+    }
+    // try loading from _OLD (An earlier Flushing MIGHT not have completed
+    // writing completely)
+    if ((perm == null) && fs.exists(oldPath)) {
+      perm = loadAndReturnPerm(oldPath, newPath);
+    }
+    // If not loaded yet,
+    // required to create an empty keystore. *sigh*
+    if (perm == null) {
+      keyStore.load(null, password);
+      LOG.debug("KeyStore initialized anew successfully !!");
+      perm = new FsPermission("700");
+    }
+    return perm;
+  }
+
+  private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
+      throws NoSuchAlgorithmException, CertificateException,
+      IOException {
+    FsPermission perm = null;
+    try {
+      perm = loadFromPath(pathToLoad, password);
+      renameOrFail(pathToLoad, path);
+      LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
+          pathToLoad));
+      if (fs.exists(pathToDelete)) {
+        fs.delete(pathToDelete, true);
+      }
+    } catch (IOException e) {
+      // Check for password issue : don't want to trash file due
+      // to wrong password
+      if (isBadorWrongPassword(e)) {
+        throw e;
+      }
+    }
+    return perm;
+  }
+
+  private boolean isBadorWrongPassword(IOException ioe) {
+    // As per documentation this is supposed to be the way to figure
+    // if password was correct
+    if (ioe.getCause() instanceof UnrecoverableKeyException) {
+      return true;
+    }
+    // Unfortunately that doesn't seem to work..
+    // Workaround :
+    if ((ioe.getCause() == null)
+        && (ioe.getMessage() != null)
+        && ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
+            .getMessage().contains("password was incorrect")))) {
+      return true;
+    }
+    return false;
+  }
+
+  private FsPermission loadFromPath(Path p, char[] password)
+      throws IOException, NoSuchAlgorithmException, CertificateException {
+    FileStatus s = fs.getFileStatus(p);
+    keyStore.load(fs.open(p), password);
+    return s.getPermission();
+  }
+
+  private Path constructNewPath(Path path) {
+    Path newPath = new Path(path.toString() + "_NEW");
+    return newPath;
+  }
+
+  private Path constructOldPath(Path path) {
+    Path oldPath = new Path(path.toString() + "_OLD");
+    return oldPath;
+  }
+
   @Override
   public KeyVersion getKeyVersion(String versionName) throws IOException {
     readLock.lock();
@@ -171,7 +319,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
       } catch (UnrecoverableKeyException e) {
         throw new IOException("Can't recover key " + key + " from " + path, e);
       }
-      return new KeyVersion(versionName, key.getEncoded());
+      return new KeyVersion(getBaseName(versionName), versionName, key.getEncoded());
     } finally {
       readLock.unlock();
     }
@@ -268,14 +416,14 @@ public class JavaKeyStoreProvider extends KeyProvider {
             e);
       }
       Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
-          options.getDescription(), new Date(), 1);
+          options.getDescription(), options.getAttributes(), new Date(), 1);
       if (options.getBitLength() != 8 * material.length) {
         throw new IOException("Wrong key length. Required " +
             options.getBitLength() + ", but got " + (8 * material.length));
       }
       cache.put(name, meta);
       String versionName = buildVersionName(name, 0);
-      return innerSetKeyVersion(versionName, material, meta.getCipher());
+      return innerSetKeyVersion(name, versionName, material, meta.getCipher());
     } finally {
       writeLock.unlock();
     }
@@ -314,7 +462,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
     }
   }
 
-  KeyVersion innerSetKeyVersion(String versionName, byte[] material,
+  KeyVersion innerSetKeyVersion(String name, String versionName, byte[] material,
                                 String cipher) throws IOException {
     try {
       keyStore.setKeyEntry(versionName, new SecretKeySpec(material, cipher),
@@ -324,7 +472,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
           e);
     }
     changed = true;
-    return new KeyVersion(versionName, material);
+    return new KeyVersion(name, versionName, material);
   }
 
   @Override
@@ -342,7 +490,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
       }
       int nextVersion = meta.addVersion();
       String versionName = buildVersionName(name, nextVersion);
-      return innerSetKeyVersion(versionName, material, meta.getCipher());
+      return innerSetKeyVersion(name, versionName, material, meta.getCipher());
     } finally {
       writeLock.unlock();
     }
@@ -350,11 +498,22 @@ public class JavaKeyStoreProvider extends KeyProvider {
 
   @Override
   public void flush() throws IOException {
+    Path newPath = constructNewPath(path);
+    Path oldPath = constructOldPath(path);
     writeLock.lock();
     try {
       if (!changed) {
         return;
       }
+      // Might exist if a backup has been restored etc.
+      if (fs.exists(newPath)) {
+        renameOrFail(newPath, new Path(newPath.toString()
+            + "_ORPHANED_" + System.currentTimeMillis()));
+      }
+      if (fs.exists(oldPath)) {
+        renameOrFail(oldPath, new Path(oldPath.toString()
+            + "_ORPHANED_" + System.currentTimeMillis()));
+      }
       // put all of the updates into the keystore
       for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
         try {
@@ -364,25 +523,77 @@ public class JavaKeyStoreProvider extends KeyProvider {
           throw new IOException("Can't set metadata key " + entry.getKey(),e );
         }
       }
+
+      // Save old File first
+      boolean fileExisted = backupToOld(oldPath);
       // write out the keystore
-      FSDataOutputStream out = FileSystem.create(fs, path, permissions);
+      // Write to _NEW path first :
       try {
-        keyStore.store(out, password);
-      } catch (KeyStoreException e) {
-        throw new IOException("Can't store keystore " + this, e);
-      } catch (NoSuchAlgorithmException e) {
-        throw new IOException("No such algorithm storing keystore " + this, e);
-      } catch (CertificateException e) {
-        throw new IOException("Certificate exception storing keystore " + this,
-            e);
+        writeToNew(newPath);
+      } catch (IOException ioe) {
+        // rename _OLD back to curent and throw Exception
+        revertFromOld(oldPath, fileExisted);
+        throw ioe;
       }
-      out.close();
+      // Rename _NEW to CURRENT and delete _OLD
+      cleanupNewAndOld(newPath, oldPath);
       changed = false;
     } finally {
       writeLock.unlock();
     }
   }
 
+  private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
+    // Rename _NEW to CURRENT
+    renameOrFail(newPath, path);
+    // Delete _OLD
+    if (fs.exists(oldPath)) {
+      fs.delete(oldPath, true);
+    }
+  }
+
+  private void writeToNew(Path newPath) throws IOException {
+    FSDataOutputStream out =
+        FileSystem.create(fs, newPath, permissions);
+    try {
+      keyStore.store(out, password);
+    } catch (KeyStoreException e) {
+      throw new IOException("Can't store keystore " + this, e);
+    } catch (NoSuchAlgorithmException e) {
+      throw new IOException(
+          "No such algorithm storing keystore " + this, e);
+    } catch (CertificateException e) {
+      throw new IOException(
+          "Certificate exception storing keystore " + this, e);
+    }
+    out.close();
+  }
+
+  private void revertFromOld(Path oldPath, boolean fileExisted)
+      throws IOException {
+    if (fileExisted) {
+      renameOrFail(oldPath, path);
+    }
+  }
+
+  private boolean backupToOld(Path oldPath)
+      throws IOException {
+    boolean fileExisted = false;
+    if (fs.exists(path)) {
+      renameOrFail(path, oldPath);
+      fileExisted = true;
+    }
+    return fileExisted;
+  }
+
+  private void renameOrFail(Path src, Path dest)
+      throws IOException {
+    if (!fs.rename(src, dest)) {
+      throw new IOException("Rename unsuccessful : "
+          + String.format("'%s' to '%s'", src, dest));
+    }
+  }
+
   @Override
   public String toString() {
     return uri.toString();

+ 128 - 80
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -23,11 +23,10 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
-import java.net.URI;
 import java.security.NoSuchAlgorithmException;
-import java.text.MessageFormat;
+import java.util.Collections;
 import java.util.Date;
-import java.util.LinkedHashMap;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -36,7 +35,6 @@ import com.google.gson.stream.JsonWriter;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 
 import javax.crypto.KeyGenerator;
 
@@ -56,20 +54,26 @@ public abstract class KeyProvider {
   public static final String DEFAULT_CIPHER = "AES/CTR/NoPadding";
   public static final String DEFAULT_BITLENGTH_NAME =
       "hadoop.security.key.default.bitlength";
-  public static final int DEFAULT_BITLENGTH = 256;
+  public static final int DEFAULT_BITLENGTH = 128;
 
   /**
    * The combination of both the key version name and the key material.
    */
   public static class KeyVersion {
+    private final String name;
     private final String versionName;
     private final byte[] material;
 
-    protected KeyVersion(String versionName,
+    protected KeyVersion(String name, String versionName,
                          byte[] material) {
+      this.name = name;
       this.versionName = versionName;
       this.material = material;
     }
+    
+    public String getName() {
+      return name;
+    }
 
     public String getVersionName() {
       return versionName;
@@ -109,26 +113,47 @@ public abstract class KeyProvider {
     private final static String CREATED_FIELD = "created";
     private final static String DESCRIPTION_FIELD = "description";
     private final static String VERSIONS_FIELD = "versions";
+    private final static String ATTRIBUTES_FIELD = "attributes";
 
     private final String cipher;
     private final int bitLength;
     private final String description;
     private final Date created;
     private int versions;
+    private Map<String, String> attributes;
 
-    protected Metadata(String cipher, int bitLength,
-                       String description, Date created, int versions) {
+    protected Metadata(String cipher, int bitLength, String description,
+        Map<String, String> attributes, Date created, int versions) {
       this.cipher = cipher;
       this.bitLength = bitLength;
       this.description = description;
+      this.attributes = (attributes == null || attributes.isEmpty())
+                        ? null : attributes;
       this.created = created;
       this.versions = versions;
     }
 
     public String toString() {
-      return MessageFormat.format(
-          "cipher: {0}, length: {1} description: {2} created: {3} version: {4}",
-          cipher, bitLength, description, created, versions);
+      final StringBuilder metaSB = new StringBuilder();
+      metaSB.append("cipher: ").append(cipher).append(", ");
+      metaSB.append("length: ").append(bitLength).append(", ");
+      metaSB.append("description: ").append(description).append(", ");
+      metaSB.append("created: ").append(created).append(", ");
+      metaSB.append("version: ").append(versions).append(", ");
+      metaSB.append("attributes: ");
+      if ((attributes != null) && !attributes.isEmpty()) {
+        for (Map.Entry<String, String> attribute : attributes.entrySet()) {
+          metaSB.append("[");
+          metaSB.append(attribute.getKey());
+          metaSB.append("=");
+          metaSB.append(attribute.getValue());
+          metaSB.append("], ");
+        }
+        metaSB.deleteCharAt(metaSB.length() - 2);  // remove last ', '
+      } else {
+        metaSB.append("null");
+      }
+      return metaSB.toString();
     }
 
     public String getDescription() {
@@ -143,6 +168,11 @@ public abstract class KeyProvider {
       return cipher;
     }
 
+    @SuppressWarnings("unchecked")
+    public Map<String, String> getAttributes() {
+      return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+    }
+
     /**
      * Get the algorithm from the cipher.
      * @return the algorithm name
@@ -176,22 +206,33 @@ public abstract class KeyProvider {
     protected byte[] serialize() throws IOException {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
       JsonWriter writer = new JsonWriter(new OutputStreamWriter(buffer));
-      writer.beginObject();
-      if (cipher != null) {
-        writer.name(CIPHER_FIELD).value(cipher);
-      }
-      if (bitLength != 0) {
-        writer.name(BIT_LENGTH_FIELD).value(bitLength);
-      }
-      if (created != null) {
-        writer.name(CREATED_FIELD).value(created.getTime());
-      }
-      if (description != null) {
-        writer.name(DESCRIPTION_FIELD).value(description);
+      try {
+        writer.beginObject();
+        if (cipher != null) {
+          writer.name(CIPHER_FIELD).value(cipher);
+        }
+        if (bitLength != 0) {
+          writer.name(BIT_LENGTH_FIELD).value(bitLength);
+        }
+        if (created != null) {
+          writer.name(CREATED_FIELD).value(created.getTime());
+        }
+        if (description != null) {
+          writer.name(DESCRIPTION_FIELD).value(description);
+        }
+        if (attributes != null && attributes.size() > 0) {
+          writer.name(ATTRIBUTES_FIELD).beginObject();
+          for (Map.Entry<String, String> attribute : attributes.entrySet()) {
+            writer.name(attribute.getKey()).value(attribute.getValue());
+          }
+          writer.endObject();
+        }
+        writer.name(VERSIONS_FIELD).value(versions);
+        writer.endObject();
+        writer.flush();
+      } finally {
+        writer.close();
       }
-      writer.name(VERSIONS_FIELD).value(versions);
-      writer.endObject();
-      writer.flush();
       return buffer.toByteArray();
     }
 
@@ -206,28 +247,41 @@ public abstract class KeyProvider {
       Date created = null;
       int versions = 0;
       String description = null;
+      Map<String, String> attributes = null;
       JsonReader reader = new JsonReader(new InputStreamReader
-          (new ByteArrayInputStream(bytes)));
-      reader.beginObject();
-      while (reader.hasNext()) {
-        String field = reader.nextName();
-        if (CIPHER_FIELD.equals(field)) {
-          cipher = reader.nextString();
-        } else if (BIT_LENGTH_FIELD.equals(field)) {
-          bitLength = reader.nextInt();
-        } else if (CREATED_FIELD.equals(field)) {
-          created = new Date(reader.nextLong());
-        } else if (VERSIONS_FIELD.equals(field)) {
-          versions = reader.nextInt();
-        } else if (DESCRIPTION_FIELD.equals(field)) {
-          description = reader.nextString();
+        (new ByteArrayInputStream(bytes)));
+      try {
+        reader.beginObject();
+        while (reader.hasNext()) {
+          String field = reader.nextName();
+          if (CIPHER_FIELD.equals(field)) {
+            cipher = reader.nextString();
+          } else if (BIT_LENGTH_FIELD.equals(field)) {
+            bitLength = reader.nextInt();
+          } else if (CREATED_FIELD.equals(field)) {
+            created = new Date(reader.nextLong());
+          } else if (VERSIONS_FIELD.equals(field)) {
+            versions = reader.nextInt();
+          } else if (DESCRIPTION_FIELD.equals(field)) {
+            description = reader.nextString();
+          } else if (ATTRIBUTES_FIELD.equalsIgnoreCase(field)) {
+            reader.beginObject();
+            attributes = new HashMap<String, String>();
+            while (reader.hasNext()) {
+              attributes.put(reader.nextName(), reader.nextString());
+            }
+            reader.endObject();
+          }
         }
+        reader.endObject();
+      } finally {
+        reader.close();
       }
-      reader.endObject();
       this.cipher = cipher;
       this.bitLength = bitLength;
       this.created = created;
       this.description = description;
+      this.attributes = attributes;
       this.versions = versions;
     }
   }
@@ -239,6 +293,7 @@ public abstract class KeyProvider {
     private String cipher;
     private int bitLength;
     private String description;
+    private Map<String, String> attributes;
 
     public Options(Configuration conf) {
       cipher = conf.get(DEFAULT_CIPHER_NAME, DEFAULT_CIPHER);
@@ -260,6 +315,16 @@ public abstract class KeyProvider {
       return this;
     }
 
+    public Options setAttributes(Map<String, String> attributes) {
+      if (attributes != null) {
+        if (attributes.containsKey(null)) {
+          throw new IllegalArgumentException("attributes cannot have a NULL key");
+        }
+        this.attributes = new HashMap<String, String>(attributes);
+      }
+      return this;
+    }
+
     public String getCipher() {
       return cipher;
     }
@@ -271,6 +336,21 @@ public abstract class KeyProvider {
     public String getDescription() {
       return description;
     }
+
+    @SuppressWarnings("unchecked")
+    public Map<String, String> getAttributes() {
+      return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+    }
+
+    @Override
+    public String toString() {
+      return "Options{" +
+          "cipher='" + cipher + '\'' +
+          ", bitLength=" + bitLength +
+          ", description='" + description + '\'' +
+          ", attributes=" + attributes +
+          '}';
+    }
   }
 
   /**
@@ -310,22 +390,17 @@ public abstract class KeyProvider {
    */
   public abstract List<String> getKeys() throws IOException;
 
-
   /**
-   * Get the key metadata for all keys.
-   *
-   * @return a Map with all the keys and their metadata
+   * Get key metadata in bulk.
+   * @param names the names of the keys to get
    * @throws IOException
    */
-  public Map<String, Metadata> getKeysMetadata() throws IOException {
-    Map<String, Metadata> keysMetadata = new LinkedHashMap<String, Metadata>();
-    for (String key : getKeys()) {
-      Metadata meta = getMetadata(key);
-      if (meta != null) {
-        keysMetadata.put(key, meta);
-      }
+  public Metadata[] getKeysMetadata(String... names) throws IOException {
+    Metadata[] result = new Metadata[names.length];
+    for (int i=0; i < names.length; ++i) {
+      result[i] = getMetadata(names[i]);
     }
-    return keysMetadata;
+    return result;
   }
 
   /**
@@ -487,33 +562,6 @@ public abstract class KeyProvider {
     return name + "@" + version;
   }
 
-  /**
-   * Convert a nested URI to decode the underlying path. The translation takes
-   * the authority and parses it into the underlying scheme and authority.
-   * For example, "myscheme://hdfs@nn/my/path" is converted to
-   * "hdfs://nn/my/path".
-   * @param nestedUri the URI from the nested URI
-   * @return the unnested path
-   */
-  public static Path unnestUri(URI nestedUri) {
-    String[] parts = nestedUri.getAuthority().split("@", 2);
-    StringBuilder result = new StringBuilder(parts[0]);
-    result.append("://");
-    if (parts.length == 2) {
-      result.append(parts[1]);
-    }
-    result.append(nestedUri.getPath());
-    if (nestedUri.getQuery() != null) {
-      result.append("?");
-      result.append(nestedUri.getQuery());
-    }
-    if (nestedUri.getFragment() != null) {
-      result.append("#");
-      result.append(nestedUri.getFragment());
-    }
-    return new Path(result.toString());
-  }
-
   /**
    * Find the provider with the given key.
    * @param providerList the list of providers

+ 383 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -0,0 +1,383 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A KeyProvider with Cryptographic Extensions specifically for generating
+ * and decrypting encrypted encryption keys.
+ * 
+ */
+@InterfaceAudience.Private
+public class KeyProviderCryptoExtension extends
+    KeyProviderExtension<KeyProviderCryptoExtension.CryptoExtension> {
+
+  /**
+   * Designates an encrypted encryption key, or EEK.
+   */
+  public static final String EEK = "EEK";
+  /**
+   * Designates a decrypted encrypted encryption key, that is, an encryption key
+   * (EK).
+   */
+  public static final String EK = "EK";
+
+  /**
+   * An encrypted encryption key (EEK) and related information. An EEK must be
+   * decrypted using the key's encryption key before it can be used.
+   */
+  public static class EncryptedKeyVersion {
+    private String encryptionKeyName;
+    private String encryptionKeyVersionName;
+    private byte[] encryptedKeyIv;
+    private KeyVersion encryptedKeyVersion;
+
+    /**
+     * Create a new EncryptedKeyVersion.
+     *
+     * @param keyName                  Name of the encryption key used to
+     *                                 encrypt the encrypted key.
+     * @param encryptionKeyVersionName Version name of the encryption key used
+     *                                 to encrypt the encrypted key.
+     * @param encryptedKeyIv           Initialization vector of the encrypted
+     *                                 key. The IV of the encryption key used to
+     *                                 encrypt the encrypted key is derived from
+     *                                 this IV.
+     * @param encryptedKeyVersion      The encrypted encryption key version.
+     */
+    protected EncryptedKeyVersion(String keyName,
+        String encryptionKeyVersionName, byte[] encryptedKeyIv,
+        KeyVersion encryptedKeyVersion) {
+      this.encryptionKeyName = keyName;
+      this.encryptionKeyVersionName = encryptionKeyVersionName;
+      this.encryptedKeyIv = encryptedKeyIv;
+      this.encryptedKeyVersion = encryptedKeyVersion;
+    }
+
+    /**
+     * Factory method to create a new EncryptedKeyVersion that can then be
+     * passed into {@link #decryptEncryptedKey}. Note that the fields of the
+     * returned EncryptedKeyVersion will only partially be populated; it is not
+     * necessarily suitable for operations besides decryption.
+     *
+     * @param encryptionKeyVersionName Version name of the encryption key used
+     *                                 to encrypt the encrypted key.
+     * @param encryptedKeyIv           Initialization vector of the encrypted
+     *                                 key. The IV of the encryption key used to
+     *                                 encrypt the encrypted key is derived from
+     *                                 this IV.
+     * @param encryptedKeyMaterial     Key material of the encrypted key.
+     * @return EncryptedKeyVersion suitable for decryption.
+     */
+    public static EncryptedKeyVersion createForDecryption(String
+        encryptionKeyVersionName, byte[] encryptedKeyIv,
+        byte[] encryptedKeyMaterial) {
+      KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
+          encryptedKeyMaterial);
+      return new EncryptedKeyVersion(null, encryptionKeyVersionName,
+          encryptedKeyIv, encryptedKeyVersion);
+    }
+
+    /**
+     * @return Name of the encryption key used to encrypt the encrypted key.
+     */
+    public String getEncryptionKeyName() {
+      return encryptionKeyName;
+    }
+
+    /**
+     * @return Version name of the encryption key used to encrypt the encrypted
+     * key.
+     */
+    public String getEncryptionKeyVersionName() {
+      return encryptionKeyVersionName;
+    }
+
+    /**
+     * @return Initialization vector of the encrypted key. The IV of the
+     * encryption key used to encrypt the encrypted key is derived from this
+     * IV.
+     */
+    public byte[] getEncryptedKeyIv() {
+      return encryptedKeyIv;
+    }
+
+    /**
+     * @return The encrypted encryption key version.
+     */
+    public KeyVersion getEncryptedKeyVersion() {
+      return encryptedKeyVersion;
+    }
+
+    /**
+     * Derive the initialization vector (IV) for the encryption key from the IV
+     * of the encrypted key. This derived IV is used with the encryption key to
+     * decrypt the encrypted key.
+     * <p/>
+     * The alternative to this is using the same IV for both the encryption key
+     * and the encrypted key. Even a simple symmetric transformation like this
+     * improves security by avoiding IV re-use. IVs will also be fairly unique
+     * among different EEKs.
+     *
+     * @param encryptedKeyIV of the encrypted key (i.e. {@link
+     * #getEncryptedKeyIv()})
+     * @return IV for the encryption key
+     */
+    protected static byte[] deriveIV(byte[] encryptedKeyIV) {
+      byte[] rIv = new byte[encryptedKeyIV.length];
+      // Do a simple XOR transformation to flip all the bits
+      for (int i = 0; i < encryptedKeyIV.length; i++) {
+        rIv[i] = (byte) (encryptedKeyIV[i] ^ 0xff);
+      }
+      return rIv;
+    }
+  }
+
+  /**
+   * CryptoExtension is a type of Extension that exposes methods to generate
+   * EncryptedKeys and to decrypt the same.
+   */
+  public interface CryptoExtension extends KeyProviderExtension.Extension {
+
+    /**
+     * Calls to this method allows the underlying KeyProvider to warm-up any
+     * implementation specific caches used to store the Encrypted Keys.
+     * @param keyNames Array of Key Names
+     */
+    public void warmUpEncryptedKeys(String... keyNames)
+        throws IOException;
+
+    /**
+     * Generates a key material and encrypts it using the given key version name
+     * and initialization vector. The generated key material is of the same
+     * length as the <code>KeyVersion</code> material of the latest key version
+     * of the key and is encrypted using the same cipher.
+     * <p/>
+     * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+     * 
+     * @param encryptionKeyName
+     *          The latest KeyVersion of this key's material will be encrypted.
+     * @return EncryptedKeyVersion with the generated key material, the version
+     *         name is 'EEK' (for Encrypted Encryption Key)
+     * @throws IOException
+     *           thrown if the key material could not be generated
+     * @throws GeneralSecurityException
+     *           thrown if the key material could not be encrypted because of a
+     *           cryptographic issue.
+     */
+    public EncryptedKeyVersion generateEncryptedKey(
+        String encryptionKeyName) throws IOException,
+        GeneralSecurityException;
+
+    /**
+     * Decrypts an encrypted byte[] key material using the given a key version
+     * name and initialization vector.
+     * 
+     * @param encryptedKeyVersion
+     *          contains keyVersionName and IV to decrypt the encrypted key
+     *          material
+     * @return a KeyVersion with the decrypted key material, the version name is
+     *         'EK' (For Encryption Key)
+     * @throws IOException
+     *           thrown if the key material could not be decrypted
+     * @throws GeneralSecurityException
+     *           thrown if the key material could not be decrypted because of a
+     *           cryptographic issue.
+     */
+    public KeyVersion decryptEncryptedKey(
+        EncryptedKeyVersion encryptedKeyVersion) throws IOException,
+        GeneralSecurityException;
+  }
+
+  private static class DefaultCryptoExtension implements CryptoExtension {
+
+    private final KeyProvider keyProvider;
+    private static final ThreadLocal<SecureRandom> RANDOM = 
+        new ThreadLocal<SecureRandom>() {
+      @Override
+      protected SecureRandom initialValue() {
+        return new SecureRandom();
+      }
+    };
+
+    private DefaultCryptoExtension(KeyProvider keyProvider) {
+      this.keyProvider = keyProvider;
+    }
+
+    @Override
+    public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
+        throws IOException, GeneralSecurityException {
+      // Fetch the encryption key
+      KeyVersion encryptionKey = keyProvider.getCurrentKey(encryptionKeyName);
+      Preconditions.checkNotNull(encryptionKey,
+          "No KeyVersion exists for key '%s' ", encryptionKeyName);
+      // Generate random bytes for new key and IV
+      Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+      final byte[] newKey = new byte[encryptionKey.getMaterial().length];
+      RANDOM.get().nextBytes(newKey);
+      final byte[] iv = new byte[cipher.getBlockSize()];
+      RANDOM.get().nextBytes(iv);
+      // Encryption key IV is derived from new key's IV
+      final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
+      // Encrypt the new key
+      cipher.init(Cipher.ENCRYPT_MODE,
+          new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
+          new IvParameterSpec(encryptionIV));
+      final byte[] encryptedKey = cipher.doFinal(newKey);
+      return new EncryptedKeyVersion(encryptionKeyName,
+          encryptionKey.getVersionName(), iv,
+          new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
+    }
+
+    @Override
+    public KeyVersion decryptEncryptedKey(
+        EncryptedKeyVersion encryptedKeyVersion) throws IOException,
+        GeneralSecurityException {
+      // Fetch the encryption key material
+      final String encryptionKeyVersionName =
+          encryptedKeyVersion.getEncryptionKeyVersionName();
+      final KeyVersion encryptionKey =
+          keyProvider.getKeyVersion(encryptionKeyVersionName);
+      Preconditions.checkNotNull(encryptionKey,
+          "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
+      Preconditions.checkArgument(
+              encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+                    .equals(KeyProviderCryptoExtension.EEK),
+                "encryptedKey version name must be '%s', is '%s'",
+                KeyProviderCryptoExtension.EEK,
+                encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+            );
+      final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
+      // Encryption key IV is determined from encrypted key's IV
+      final byte[] encryptionIV =
+          EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
+      // Init the cipher with encryption key parameters
+      Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+      cipher.init(Cipher.DECRYPT_MODE,
+          new SecretKeySpec(encryptionKeyMaterial, "AES"),
+          new IvParameterSpec(encryptionIV));
+      // Decrypt the encrypted key
+      final KeyVersion encryptedKV =
+          encryptedKeyVersion.getEncryptedKeyVersion();
+      final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial());
+      return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
+    }
+
+    @Override
+    public void warmUpEncryptedKeys(String... keyNames)
+        throws IOException {
+      // NO-OP since the default version does not cache any keys
+    }
+
+  }
+
+  /**
+   * This constructor is to be used by sub classes that provide
+   * delegating/proxying functionality to the {@link KeyProviderCryptoExtension}
+   * @param keyProvider
+   * @param extension
+   */
+  protected KeyProviderCryptoExtension(KeyProvider keyProvider,
+      CryptoExtension extension) {
+    super(keyProvider, extension);
+  }
+
+  /**
+   * Notifies the Underlying CryptoExtension implementation to warm up any
+   * implementation specific caches for the specified KeyVersions
+   * @param keyNames Arrays of key Names
+   */
+  public void warmUpEncryptedKeys(String... keyNames)
+      throws IOException {
+    getExtension().warmUpEncryptedKeys(keyNames);
+  }
+
+  /**
+   * Generates a key material and encrypts it using the given key version name
+   * and initialization vector. The generated key material is of the same
+   * length as the <code>KeyVersion</code> material and is encrypted using the
+   * same cipher.
+   * <p/>
+   * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+   *
+   * @param encryptionKeyName The latest KeyVersion of this key's material will
+   * be encrypted.
+   * @return EncryptedKeyVersion with the generated key material, the version
+   * name is 'EEK' (for Encrypted Encryption Key)
+   * @throws IOException thrown if the key material could not be generated
+   * @throws GeneralSecurityException thrown if the key material could not be 
+   * encrypted because of a cryptographic issue.
+   */
+  public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
+      throws IOException,
+                                           GeneralSecurityException {
+    return getExtension().generateEncryptedKey(encryptionKeyName);
+  }
+
+  /**
+   * Decrypts an encrypted byte[] key material using the given a key version
+   * name and initialization vector.
+   *
+   * @param encryptedKey contains keyVersionName and IV to decrypt the encrypted 
+   * key material
+   * @return a KeyVersion with the decrypted key material, the version name is
+   * 'EK' (For Encryption Key)
+   * @throws IOException thrown if the key material could not be decrypted
+   * @throws GeneralSecurityException thrown if the key material could not be 
+   * decrypted because of a cryptographic issue.
+   */
+  public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKey) 
+      throws IOException, GeneralSecurityException {
+    return getExtension().decryptEncryptedKey(encryptedKey);
+  }
+
+  /**
+   * Creates a <code>KeyProviderCryptoExtension</code> using a given 
+   * {@link KeyProvider}.
+   * <p/>
+   * If the given <code>KeyProvider</code> implements the 
+   * {@link CryptoExtension} interface the <code>KeyProvider</code> itself
+   * will provide the extension functionality, otherwise a default extension
+   * implementation will be used.
+   * 
+   * @param keyProvider <code>KeyProvider</code> to use to create the 
+   * <code>KeyProviderCryptoExtension</code> extension.
+   * @return a <code>KeyProviderCryptoExtension</code> instance using the
+   * given <code>KeyProvider</code>.
+   */
+  public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
+      KeyProvider keyProvider) {
+    CryptoExtension cryptoExtension = (keyProvider instanceof CryptoExtension)
+                         ? (CryptoExtension) keyProvider
+                         : new DefaultCryptoExtension(keyProvider);
+    return new KeyProviderCryptoExtension(keyProvider, cryptoExtension);
+  }  
+
+}

+ 115 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java

@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key;
+
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+
+import java.io.IOException;
+
+/**
+ * A KeyProvider extension with the ability to add a renewer's Delegation 
+ * Tokens to the provided Credentials.
+ */
+public class KeyProviderDelegationTokenExtension extends
+    KeyProviderExtension
+    <KeyProviderDelegationTokenExtension.DelegationTokenExtension> {
+  
+  private static DelegationTokenExtension DEFAULT_EXTENSION = 
+      new DefaultDelegationTokenExtension();
+
+  /**
+   * DelegationTokenExtension is a type of Extension that exposes methods to 
+   * needed to work with Delegation Tokens.
+   */  
+  public interface DelegationTokenExtension extends 
+    KeyProviderExtension.Extension {
+    
+    /**
+     * The implementer of this class will take a renewer and add all
+     * delegation tokens associated with the renewer to the 
+     * <code>Credentials</code> object if it is not already present, 
+     * @param renewer the user allowed to renew the delegation tokens
+     * @param credentials cache in which to add new delegation tokens
+     * @return list of new delegation tokens
+     * @throws IOException thrown if IOException if an IO error occurs.
+     */
+    public Token<?>[] addDelegationTokens(final String renewer, 
+        Credentials credentials) throws IOException;
+  }
+  
+  /**
+   * Default implementation of {@link DelegationTokenExtension} that
+   * implements the method as a no-op.
+   */
+  private static class DefaultDelegationTokenExtension implements 
+    DelegationTokenExtension {    
+    
+    @Override
+    public Token<?>[] addDelegationTokens(String renewer,
+        Credentials credentials) {
+      return null;
+    }
+    
+  }
+
+  private KeyProviderDelegationTokenExtension(KeyProvider keyProvider,
+      DelegationTokenExtension extensions) {
+    super(keyProvider, extensions);
+  }
+  
+  /**
+   * Passes the renewer and Credentials object to the underlying 
+   * {@link DelegationTokenExtension} 
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  public Token<?>[] addDelegationTokens(final String renewer, 
+      Credentials credentials) throws IOException {
+    return getExtension().addDelegationTokens(renewer, credentials);
+  }
+  
+  /**
+   * Creates a <code>KeyProviderDelegationTokenExtension</code> using a given 
+   * {@link KeyProvider}.
+   * <p/>
+   * If the given <code>KeyProvider</code> implements the 
+   * {@link DelegationTokenExtension} interface the <code>KeyProvider</code> 
+   * itself will provide the extension functionality, otherwise a default 
+   * extension implementation will be used.
+   * 
+   * @param keyProvider <code>KeyProvider</code> to use to create the 
+   * <code>KeyProviderDelegationTokenExtension</code> extension.
+   * @return a <code>KeyProviderDelegationTokenExtension</code> instance 
+   * using the given <code>KeyProvider</code>.
+   */  
+  public static KeyProviderDelegationTokenExtension
+      createKeyProviderDelegationTokenExtension(KeyProvider keyProvider) {
+
+    DelegationTokenExtension delTokExtension =
+        (keyProvider instanceof DelegationTokenExtension) ?
+            (DelegationTokenExtension) keyProvider :
+            DEFAULT_EXTENSION;
+    return new KeyProviderDelegationTokenExtension(
+        keyProvider, delTokExtension);
+
+  }
+
+}

+ 128 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java

@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto.key;
+
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+
+/**
+ * This is a utility class used to extend the functionality of KeyProvider, that
+ * takes a KeyProvider and an Extension. It implements all the required methods
+ * of the KeyProvider by delegating it to the provided KeyProvider.
+ */
+public abstract class KeyProviderExtension
+<E extends KeyProviderExtension.Extension> extends KeyProvider {
+
+  /**
+   * A marker interface for the KeyProviderExtension subclass implement.
+   */
+  public static interface Extension {
+  }
+
+  private KeyProvider keyProvider;
+  private E extension;
+
+  public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
+    this.keyProvider = keyProvider;
+    this.extension = extensions;
+  }
+  
+  protected E getExtension() {
+    return extension;
+  }
+  
+  protected KeyProvider getKeyProvider() {
+    return keyProvider;
+  }
+
+  @Override
+  public boolean isTransient() {
+    return keyProvider.isTransient();
+  }
+
+  @Override
+  public Metadata[] getKeysMetadata(String... names) throws IOException {
+    return keyProvider.getKeysMetadata(names);
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    return keyProvider.getCurrentKey(name);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, Options options)
+      throws NoSuchAlgorithmException, IOException {
+    return keyProvider.createKey(name, options);
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    return keyProvider.rollNewVersion(name);
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName) throws IOException {
+    return keyProvider.getKeyVersion(versionName);
+  }
+
+  @Override
+  public List<String> getKeys() throws IOException {
+    return keyProvider.getKeys();
+  }
+
+  @Override
+  public List<KeyVersion> getKeyVersions(String name) throws IOException {
+    return keyProvider.getKeyVersions(name);
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    return keyProvider.getMetadata(name);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, byte[] material, Options options)
+      throws IOException {
+    return keyProvider.createKey(name, material, options);
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    keyProvider.deleteKey(name);
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    return keyProvider.rollNewVersion(name, material);
+  }
+
+  @Override
+  public void flush() throws IOException {
+    keyProvider.flush();
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + ": " + keyProvider.toString();
+  }
+}

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 import java.util.ServiceLoader;
 
@@ -47,6 +48,15 @@ public abstract class KeyProviderFactory {
   private static final ServiceLoader<KeyProviderFactory> serviceLoader =
       ServiceLoader.load(KeyProviderFactory.class);
 
+  // Iterate through the serviceLoader to avoid lazy loading.
+  // Lazy loading would require synchronization in concurrent use cases.
+  static {
+    Iterator<KeyProviderFactory> iterServices = serviceLoader.iterator();
+    while (iterServices.hasNext()) {
+      iterServices.next();
+    }
+  }
+  
   public static List<KeyProvider> getProviders(Configuration conf
                                                ) throws IOException {
     List<KeyProvider> result = new ArrayList<KeyProvider>();

+ 161 - 108
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -22,11 +22,10 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.InvalidParameterException;
 import java.security.NoSuchAlgorithmException;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import javax.crypto.KeyGenerator;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
@@ -39,9 +38,9 @@ import org.apache.hadoop.util.ToolRunner;
  */
 public class KeyShell extends Configured implements Tool {
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
-  		"[generic options]\n";
+      "[generic options]\n";
   final static private String COMMANDS =
-      "   [--help]\n" +
+      "   [-help]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
@@ -58,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
 
   private boolean userSuppliedProvider = false;
 
+  /**
+   * Primary entry point for the KeyShell; called via main().
+   *
+   * @param args Command line arguments.
+   * @return 0 on success and 1 on failure.  This value is passed back to
+   * the unix shell, so we must follow shell return code conventions:
+   * the return code is an unsigned character, and 0 means success, and
+   * small positive integers mean failure.
+   * @throws Exception
+   */
   @Override
   public int run(String[] args) throws Exception {
     int exitCode = 0;
@@ -69,11 +78,11 @@ public class KeyShell extends Configured implements Tool {
       if (command.validate()) {
           command.execute();
       } else {
-        exitCode = -1;
+        exitCode = 1;
       }
     } catch (Exception e) {
       e.printStackTrace(err);
-      return -1;
+      return 1;
     }
     return exitCode;
   }
@@ -81,61 +90,106 @@ public class KeyShell extends Configured implements Tool {
   /**
    * Parse the command line arguments and initialize the data
    * <pre>
-   * % hadoop key create keyName [--size size] [--cipher algorithm]
-   *    [--provider providerPath]
-   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key create keyName [-size size] [-cipher algorithm]
+   *    [-provider providerPath]
+   * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
-   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * % hadoop key delete keyName [-provider providerPath] [-i]
    * </pre>
-   * @param args
-   * @return
+   * @param args Command line arguments.
+   * @return 0 on success, 1 on failure.
    * @throws IOException
    */
   private int init(String[] args) throws IOException {
+    final Options options = KeyProvider.options(getConf());
+    final Map<String, String> attributes = new HashMap<String, String>();
+
     for (int i = 0; i < args.length; i++) { // parse command line
+      boolean moreTokens = (i < args.length - 1);
       if (args[i].equals("create")) {
-        String keyName = args[++i];
-        command = new CreateCommand(keyName);
-        if (keyName.equals("--help")) {
+        String keyName = "-help";
+        if (moreTokens) {
+          keyName = args[++i];
+        }
+
+        command = new CreateCommand(keyName, options);
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
       } else if (args[i].equals("delete")) {
-        String keyName = args[++i];
+        String keyName = "-help";
+        if (moreTokens) {
+          keyName = args[++i];
+        }
+
         command = new DeleteCommand(keyName);
-        if (keyName.equals("--help")) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
       } else if (args[i].equals("roll")) {
-        String keyName = args[++i];
+        String keyName = "-help";
+        if (moreTokens) {
+          keyName = args[++i];
+        }
+
         command = new RollCommand(keyName);
-        if (keyName.equals("--help")) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
-          return -1;
+          return 1;
         }
-      } else if (args[i].equals("list")) {
+      } else if ("list".equals(args[i])) {
         command = new ListCommand();
-      } else if (args[i].equals("--size")) {
-        getConf().set(KeyProvider.DEFAULT_BITLENGTH_NAME, args[++i]);
-      } else if (args[i].equals("--cipher")) {
-        getConf().set(KeyProvider.DEFAULT_CIPHER_NAME, args[++i]);
-      } else if (args[i].equals("--provider")) {
+      } else if ("-size".equals(args[i]) && moreTokens) {
+        options.setBitLength(Integer.parseInt(args[++i]));
+      } else if ("-cipher".equals(args[i]) && moreTokens) {
+        options.setCipher(args[++i]);
+      } else if ("-description".equals(args[i]) && moreTokens) {
+        options.setDescription(args[++i]);
+      } else if ("-attr".equals(args[i]) && moreTokens) {
+        final String attrval[] = args[++i].split("=", 2);
+        final String attr = attrval[0].trim();
+        final String val = attrval[1].trim();
+        if (attr.isEmpty() || val.isEmpty()) {
+          out.println("\nAttributes must be in attribute=value form, " +
+                  "or quoted\nlike \"attribute = value\"\n");
+          printKeyShellUsage();
+          return 1;
+        }
+        if (attributes.containsKey(attr)) {
+          out.println("\nEach attribute must correspond to only one value:\n" +
+                  "atttribute \"" + attr + "\" was repeated\n" );
+          printKeyShellUsage();
+          return 1;
+        }
+        attributes.put(attr, val);
+      } else if ("-provider".equals(args[i]) && moreTokens) {
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
-      } else if (args[i].equals("--metadata")) {
+      } else if ("-metadata".equals(args[i])) {
         getConf().setBoolean(LIST_METADATA, true);
-      } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) {
+      } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) {
         interactive = true;
-      } else if (args[i].equals("--help")) {
+      } else if ("-help".equals(args[i])) {
         printKeyShellUsage();
-        return -1;
+        return 1;
       } else {
         printKeyShellUsage();
         ToolRunner.printGenericCommandUsage(System.err);
-        return -1;
+        return 1;
       }
     }
+
+    if (command == null) {
+      printKeyShellUsage();
+      return 1;
+    }
+
+    if (!attributes.isEmpty()) {
+      options.setAttributes(attributes);
+    }
+
     return 0;
   }
 
@@ -143,8 +197,7 @@ public class KeyShell extends Configured implements Tool {
     out.println(USAGE_PREFIX + COMMANDS);
     if (command != null) {
       out.println(command.getUsage());
-    }
-    else {
+    } else {
       out.println("=========================================================" +
       		"======");
       out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
@@ -174,8 +227,7 @@ public class KeyShell extends Configured implements Tool {
         providers = KeyProviderFactory.getProviders(getConf());
         if (userSuppliedProvider) {
           provider = providers.get(0);
-        }
-        else {
+        } else {
           for (KeyProvider p : providers) {
             if (!p.isTransient()) {
               provider = p;
@@ -190,7 +242,7 @@ public class KeyShell extends Configured implements Tool {
     }
 
     protected void printProviderWritten() {
-        out.println(provider.getClass().getName() + " has been updated.");
+        out.println(provider + " has been updated.");
     }
 
     protected void warnIfTransientProvider() {
@@ -206,12 +258,12 @@ public class KeyShell extends Configured implements Tool {
 
   private class ListCommand extends Command {
     public static final String USAGE =
-        "list [--provider] [--metadata] [--help]";
+        "list [-provider <provider>] [-metadata] [-help]";
     public static final String DESC =
-        "The list subcommand displays the keynames contained within \n" +
-        "a particular provider - as configured in core-site.xml or " +
-        "indicated\nthrough the --provider argument.\n" +
-        "If the --metadata option is used, the keys metadata will be printed";
+        "The list subcommand displays the keynames contained within\n" +
+        "a particular provider as configured in core-site.xml or\n" +
+        "specified with the -provider argument. -metadata displays\n" +
+        "the metadata.";
 
     private boolean metadata = false;
 
@@ -220,9 +272,9 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       if (provider == null) {
         out.println("There are no non-transient KeyProviders configured.\n"
-            + "Consider using the --provider option to indicate the provider\n"
-            + "to use. If you want to list a transient provider then you\n"
-            + "you MUST use the --provider argument.");
+          + "Use the -provider option to specify a provider. If you\n"
+          + "want to list a transient provider then you must use the\n"
+          + "-provider argument.");
         rc = false;
       }
       metadata = getConf().getBoolean(LIST_METADATA, false);
@@ -230,22 +282,22 @@ public class KeyShell extends Configured implements Tool {
     }
 
     public void execute() throws IOException {
-      List<String> keys;
       try {
-        out.println("Listing keys for KeyProvider: " + provider.toString());
+        final List<String> keys = provider.getKeys();
+        out.println("Listing keys for KeyProvider: " + provider);
         if (metadata) {
-          Map<String, Metadata> keysMeta = provider.getKeysMetadata();
-          for (Map.Entry<String, Metadata> entry : keysMeta.entrySet()) {
-            out.println(entry.getKey() + " : " + entry.getValue());
+          final Metadata[] meta =
+            provider.getKeysMetadata(keys.toArray(new String[keys.size()]));
+          for (int i = 0; i < meta.length; ++i) {
+            out.println(keys.get(i) + " : " + meta[i]);
           }
         } else {
-          keys = provider.getKeys();
           for (String keyName : keys) {
             out.println(keyName);
           }
         }
       } catch (IOException e) {
-        out.println("Cannot list keys for KeyProvider: " + provider.toString()
+        out.println("Cannot list keys for KeyProvider: " + provider
             + ": " + e.getMessage());
         throw e;
       }
@@ -258,11 +310,10 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class RollCommand extends Command {
-    public static final String USAGE = "roll <keyname> [--provider] [--help]";
+    public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
-        "The roll subcommand creates a new version of the key specified\n" +
-        "through the <keyname> argument within the provider indicated using\n" +
-        "the --provider argument";
+      "The roll subcommand creates a new version for the specified key\n" +
+      "within the provider indicated using the -provider argument\n";
 
     String keyName = null;
 
@@ -274,15 +325,14 @@ public class KeyShell extends Configured implements Tool {
       boolean rc = true;
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no valid KeyProviders configured.\n"
-            + "Key will not be rolled.\n"
-            + "Consider using the --provider option to indicate the provider"
-            + " to use.");
+        out.println("There are no valid KeyProviders configured. The key\n" +
+          "has not been rolled. Use the -provider option to specify\n" +
+          "a provider.");
         rc = false;
       }
       if (keyName == null) {
-        out.println("There is no keyName specified. Please provide the" +
-            "mandatory <keyname>. See the usage description with --help.");
+        out.println("Please provide a <keyname>.\n" +
+          "See the usage description by using -help.");
         rc = false;
       }
       return rc;
@@ -290,10 +340,9 @@ public class KeyShell extends Configured implements Tool {
 
     public void execute() throws NoSuchAlgorithmException, IOException {
       try {
-        Metadata md = provider.getMetadata(keyName);
         warnIfTransientProvider();
         out.println("Rolling key version from KeyProvider: "
-            + provider.toString() + " for key name: " + keyName);
+            + provider + "\n  for key name: " + keyName);
         try {
           provider.rollNewVersion(keyName);
           out.println(keyName + " has been successfully rolled.");
@@ -301,12 +350,12 @@ public class KeyShell extends Configured implements Tool {
           printProviderWritten();
         } catch (NoSuchAlgorithmException e) {
           out.println("Cannot roll key: " + keyName + " within KeyProvider: "
-              + provider.toString());
+              + provider);
           throw e;
         }
       } catch (IOException e1) {
         out.println("Cannot roll key: " + keyName + " within KeyProvider: "
-            + provider.toString());
+            + provider);
         throw e1;
       }
     }
@@ -318,11 +367,11 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class DeleteCommand extends Command {
-    public static final String USAGE = "delete <keyname> [--provider] [--help]";
+    public static final String USAGE = "delete <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
-        "The delete subcommand deletes all of the versions of the key\n" +
-        "specified as the <keyname> argument from within the provider\n" +
-        "indicated through the --provider argument";
+        "The delete subcommand deletes all versions of the key\n" +
+        "specified by the <keyname> argument from within the\n" +
+        "provider specified -provider.";
 
     String keyName = null;
     boolean cont = true;
@@ -335,23 +384,21 @@ public class KeyShell extends Configured implements Tool {
     public boolean validate() {
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no valid KeyProviders configured.\n"
-            + "Nothing will be deleted.\n"
-            + "Consider using the --provider option to indicate the provider"
-            + " to use.");
+        out.println("There are no valid KeyProviders configured. Nothing\n"
+          + "was deleted. Use the -provider option to specify a provider.");
         return false;
       }
       if (keyName == null) {
-        out.println("There is no keyName specified. Please provide the" +
-            "mandatory <keyname>. See the usage description with --help.");
+        out.println("There is no keyName specified. Please specify a " +
+            "<keyname>. See the usage description with -help.");
         return false;
       }
       if (interactive) {
         try {
           cont = ToolRunner
               .confirmPrompt("You are about to DELETE all versions of "
-                  + "the key: " + keyName + " from KeyProvider "
-                  + provider.toString() + ". Continue?:");
+                  + " key: " + keyName + " from KeyProvider "
+                  + provider + ". Continue?:");
           if (!cont) {
             out.println("Nothing has been be deleted.");
           }
@@ -367,7 +414,7 @@ public class KeyShell extends Configured implements Tool {
     public void execute() throws IOException {
       warnIfTransientProvider();
       out.println("Deleting key: " + keyName + " from KeyProvider: "
-          + provider.toString());
+          + provider);
       if (cont) {
         try {
           provider.deleteKey(keyName);
@@ -375,7 +422,7 @@ public class KeyShell extends Configured implements Tool {
           provider.flush();
           printProviderWritten();
         } catch (IOException e) {
-          out.println(keyName + "has NOT been deleted.");
+          out.println(keyName + " has not been deleted.");
           throw e;
         }
       }
@@ -388,36 +435,41 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class CreateCommand extends Command {
-    public static final String USAGE = "create <keyname> [--cipher] " +
-    		"[--size] [--provider] [--help]";
+    public static final String USAGE =
+      "create <keyname> [-cipher <cipher>] [-size <size>]\n" +
+      "                     [-description <description>]\n" +
+      "                     [-attr <attribute=value>]\n" +
+      "                     [-provider <provider>] [-help]";
     public static final String DESC =
-        "The create subcommand creates a new key for the name specified\n" +
-        "as the <keyname> argument within the provider indicated through\n" +
-        "the --provider argument. You may also indicate the specific\n" +
-        "cipher through the --cipher argument. The default for cipher is\n" +
-        "currently \"AES/CTR/NoPadding\". The default keysize is \"256\".\n" +
-        "You may also indicate the requested key length through the --size\n" +
-        "argument.";
-
-    String keyName = null;
-
-    public CreateCommand(String keyName) {
+      "The create subcommand creates a new key for the name specified\n" +
+      "by the <keyname> argument within the provider specified by the\n" +
+      "-provider argument. You may specify a cipher with the -cipher\n" +
+      "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
+      "The default keysize is 128. You may specify the requested key\n" +
+      "length using the -size argument. Arbitrary attribute=value\n" +
+      "style attributes may be specified using the -attr argument.\n" +
+      "-attr may be specified multiple times, once per attribute.\n";
+
+    final String keyName;
+    final Options options;
+
+    public CreateCommand(String keyName, Options options) {
       this.keyName = keyName;
+      this.options = options;
     }
 
     public boolean validate() {
       boolean rc = true;
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no valid KeyProviders configured.\nKey" +
-        		" will not be created.\n"
-            + "Consider using the --provider option to indicate the provider" +
-            " to use.");
+        out.println("There are no valid KeyProviders configured. No key\n" +
+          " was created. You can use the -provider option to specify\n" +
+          " a provider to use.");
         rc = false;
       }
       if (keyName == null) {
-        out.println("There is no keyName specified. Please provide the" +
-        		"mandatory <keyname>. See the usage description with --help.");
+        out.println("Please provide a <keyname>. See the usage description" +
+          " with -help.");
         rc = false;
       }
       return rc;
@@ -426,19 +478,19 @@ public class KeyShell extends Configured implements Tool {
     public void execute() throws IOException, NoSuchAlgorithmException {
       warnIfTransientProvider();
       try {
-        Options options = KeyProvider.options(getConf());
         provider.createKey(keyName, options);
-        out.println(keyName + " has been successfully created.");
+        out.println(keyName + " has been successfully created with options "
+            + options.toString() + ".");
         provider.flush();
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        out.println(keyName + " has NOT been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.getMessage());
         throw e;
       } catch (IOException e) {
-        out.println(keyName + " has NOT been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.getMessage());
         throw e;
       } catch (NoSuchAlgorithmException e) {
-        out.println(keyName + " has NOT been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.getMessage());
         throw e;
       }
     }
@@ -450,10 +502,11 @@ public class KeyShell extends Configured implements Tool {
   }
 
   /**
-   * Main program.
+   * main() entry point for the KeyShell.  While strictly speaking the
+   * return is void, it will System.exit() with a return code: 0 is for
+   * success and 1 for failure.
    *
-   * @param args
-   *          Command line arguments
+   * @param args Command line arguments.
    * @throws Exception
    */
   public static void main(String[] args) throws Exception {

+ 6 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java

@@ -55,12 +55,13 @@ public class UserProvider extends KeyProvider {
   }
 
   @Override
-  public synchronized KeyVersion getKeyVersion(String versionName) {
+  public synchronized KeyVersion getKeyVersion(String versionName)
+      throws IOException {
     byte[] bytes = credentials.getSecretKey(new Text(versionName));
     if (bytes == null) {
       return null;
     }
-    return new KeyVersion(versionName, bytes);
+    return new KeyVersion(getBaseName(versionName), versionName, bytes);
   }
 
   @Override
@@ -89,12 +90,12 @@ public class UserProvider extends KeyProvider {
           options.getBitLength() + ", but got " + (8 * material.length));
     }
     Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
-        options.getDescription(), new Date(), 1);
+        options.getDescription(), options.getAttributes(), new Date(), 1);
     cache.put(name, meta);
     String versionName = buildVersionName(name, 0);
     credentials.addSecretKey(nameT, meta.serialize());
     credentials.addSecretKey(new Text(versionName), material);
-    return new KeyVersion(versionName, material);
+    return new KeyVersion(name, versionName, material);
   }
 
   @Override
@@ -125,7 +126,7 @@ public class UserProvider extends KeyProvider {
     credentials.addSecretKey(new Text(name), meta.serialize());
     String versionName = buildVersionName(name, nextVersion);
     credentials.addSecretKey(new Text(versionName), material);
-    return new KeyVersion(versionName, material);
+    return new KeyVersion(name, versionName, material);
   }
 
   @Override

+ 788 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -0,0 +1,788 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import org.apache.http.client.utils.URIBuilder;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.net.ssl.HttpsURLConnection;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.lang.reflect.Constructor;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.security.GeneralSecurityException;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedExceptionAction;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * KMS client <code>KeyProvider</code> implementation.
+ */
+@InterfaceAudience.Private
+public class KMSClientProvider extends KeyProvider implements CryptoExtension,
+    KeyProviderDelegationTokenExtension.DelegationTokenExtension {
+
+  public static final String TOKEN_KIND = "kms-dt";
+
+  public static final String SCHEME_NAME = "kms";
+
+  private static final String UTF8 = "UTF-8";
+
+  private static final String CONTENT_TYPE = "Content-Type";
+  private static final String APPLICATION_JSON_MIME = "application/json";
+
+  private static final String HTTP_GET = "GET";
+  private static final String HTTP_POST = "POST";
+  private static final String HTTP_PUT = "PUT";
+  private static final String HTTP_DELETE = "DELETE";
+
+
+  private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
+
+  /* It's possible to specify a timeout, in seconds, in the config file */
+  public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
+  public static final int DEFAULT_TIMEOUT = 60;
+
+  private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
+
+  private class EncryptedQueueRefiller implements
+    ValueQueue.QueueRefiller<EncryptedKeyVersion> {
+
+    @Override
+    public void fillQueueForKey(String keyName,
+        Queue<EncryptedKeyVersion> keyQueue, int numEKVs) throws IOException {
+      checkNotNull(keyName, "keyName");
+      Map<String, String> params = new HashMap<String, String>();
+      params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_GENERATE);
+      params.put(KMSRESTConstants.EEK_NUM_KEYS, "" + numEKVs);
+      URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName,
+          KMSRESTConstants.EEK_SUB_RESOURCE, params);
+      HttpURLConnection conn = createConnection(url, HTTP_GET);
+      conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+      List response = call(conn, null,
+          HttpURLConnection.HTTP_OK, List.class);
+      List<EncryptedKeyVersion> ekvs =
+          parseJSONEncKeyVersion(keyName, response);
+      keyQueue.addAll(ekvs);
+    }
+  }
+
+  public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion {
+    public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
+        byte[] iv, String encryptedVersionName, byte[] keyMaterial) {
+      super(keyName, keyVersionName, iv, new KMSKeyVersion(null, 
+          encryptedVersionName, keyMaterial));
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  private static List<EncryptedKeyVersion>
+      parseJSONEncKeyVersion(String keyName, List valueList) {
+    List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
+    if (!valueList.isEmpty()) {
+      for (Object values : valueList) {
+        Map valueMap = (Map) values;
+
+        String versionName = checkNotNull(
+                (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+                KMSRESTConstants.VERSION_NAME_FIELD);
+
+        byte[] iv = Base64.decodeBase64(checkNotNull(
+                (String) valueMap.get(KMSRESTConstants.IV_FIELD),
+                KMSRESTConstants.IV_FIELD));
+
+        Map encValueMap = checkNotNull((Map)
+                valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
+                KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
+
+        String encVersionName = checkNotNull((String)
+                encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+                KMSRESTConstants.VERSION_NAME_FIELD);
+
+        byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
+                encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
+                KMSRESTConstants.MATERIAL_FIELD));
+
+        ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
+            encVersionName, encKeyMaterial));
+      }
+    }
+    return ekvs;
+  }
+
+  private static KeyVersion parseJSONKeyVersion(Map valueMap) {
+    KeyVersion keyVersion = null;
+    if (!valueMap.isEmpty()) {
+      byte[] material = (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD))
+          ? Base64.decodeBase64((String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD))
+          : null;
+      String versionName = (String)valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD);
+      String keyName = (String)valueMap.get(KMSRESTConstants.NAME_FIELD);
+      keyVersion = new KMSKeyVersion(keyName, versionName, material);
+    }
+    return keyVersion;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static Metadata parseJSONMetadata(Map valueMap) {
+    Metadata metadata = null;
+    if (!valueMap.isEmpty()) {
+      metadata = new KMSMetadata(
+          (String) valueMap.get(KMSRESTConstants.CIPHER_FIELD),
+          (Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD),
+          (String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD),
+          (Map<String, String>) valueMap.get(KMSRESTConstants.ATTRIBUTES_FIELD),
+          new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)),
+          (Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD));
+    }
+    return metadata;
+  }
+
+  private static void writeJson(Map map, OutputStream os) throws IOException {
+    Writer writer = new OutputStreamWriter(os);
+    ObjectMapper jsonMapper = new ObjectMapper();
+    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
+  }
+
+  /**
+   * The factory to create KMSClientProvider, which is used by the
+   * ServiceLoader.
+   */
+  public static class Factory extends KeyProviderFactory {
+
+    @Override
+    public KeyProvider createProvider(URI providerName, Configuration conf)
+        throws IOException {
+      if (SCHEME_NAME.equals(providerName.getScheme())) {
+        return new KMSClientProvider(providerName, conf);
+      }
+      return null;
+    }
+  }
+
+  public static <T> T checkNotNull(T o, String name)
+      throws IllegalArgumentException {
+    if (o == null) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be null");
+    }
+    return o;
+  }
+
+  public static String checkNotEmpty(String s, String name)
+      throws IllegalArgumentException {
+    checkNotNull(s, name);
+    if (s.isEmpty()) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be empty");
+    }
+    return s;
+  }
+
+  private String kmsUrl;
+  private SSLFactory sslFactory;
+  private ConnectionConfigurator configurator;
+  private DelegationTokenAuthenticatedURL.Token authToken;
+  private UserGroupInformation loginUgi;
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder("KMSClientProvider[");
+    sb.append(kmsUrl).append("]");
+    return sb.toString();
+  }
+
+  /**
+   * This small class exists to set the timeout values for a connection
+   */
+  private static class TimeoutConnConfigurator
+          implements ConnectionConfigurator {
+    private ConnectionConfigurator cc;
+    private int timeout;
+
+    /**
+     * Sets the timeout and wraps another connection configurator
+     * @param timeout - will set both connect and read timeouts - in seconds
+     * @param cc - another configurator to wrap - may be null
+     */
+    public TimeoutConnConfigurator(int timeout, ConnectionConfigurator cc) {
+      this.timeout = timeout;
+      this.cc = cc;
+    }
+
+    /**
+     * Calls the wrapped configure() method, then sets timeouts
+     * @param conn the {@link HttpURLConnection} instance to configure.
+     * @return the connection
+     * @throws IOException
+     */
+    @Override
+    public HttpURLConnection configure(HttpURLConnection conn)
+            throws IOException {
+      if (cc != null) {
+        conn = cc.configure(conn);
+      }
+      conn.setConnectTimeout(timeout * 1000);  // conversion to milliseconds
+      conn.setReadTimeout(timeout * 1000);
+      return conn;
+    }
+  }
+
+  public KMSClientProvider(URI uri, Configuration conf) throws IOException {
+    Path path = ProviderUtils.unnestUri(uri);
+    URL url = path.toUri().toURL();
+    kmsUrl = createServiceURL(url);
+    if ("https".equalsIgnoreCase(url.getProtocol())) {
+      sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+      try {
+        sslFactory.init();
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+    }
+    int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
+    configurator = new TimeoutConnConfigurator(timeout, sslFactory);
+    encKeyVersionQueue =
+        new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
+            conf.getInt(
+                CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
+            conf.getFloat(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
+            conf.getInt(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
+            conf.getInt(
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
+                CommonConfigurationKeysPublic.
+                    KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
+            new EncryptedQueueRefiller());
+    authToken = new DelegationTokenAuthenticatedURL.Token();
+    loginUgi = UserGroupInformation.getCurrentUser();
+  }
+
+  private String createServiceURL(URL url) throws IOException {
+    String str = url.toExternalForm();
+    if (str.endsWith("/")) {
+      str = str.substring(0, str.length() - 1);
+    }
+    return new URL(str + KMSRESTConstants.SERVICE_VERSION + "/").
+        toExternalForm();
+  }
+
+  private URL createURL(String collection, String resource, String subResource,
+      Map<String, ?> parameters) throws IOException {
+    try {
+      StringBuilder sb = new StringBuilder();
+      sb.append(kmsUrl);
+      if (collection != null) {
+        sb.append(collection);
+        if (resource != null) {
+          sb.append("/").append(URLEncoder.encode(resource, UTF8));
+          if (subResource != null) {
+            sb.append("/").append(subResource);
+          }
+        }
+      }
+      URIBuilder uriBuilder = new URIBuilder(sb.toString());
+      if (parameters != null) {
+        for (Map.Entry<String, ?> param : parameters.entrySet()) {
+          Object value = param.getValue();
+          if (value instanceof String) {
+            uriBuilder.addParameter(param.getKey(), (String) value);
+          } else {
+            for (String s : (String[]) value) {
+              uriBuilder.addParameter(param.getKey(), s);
+            }
+          }
+        }
+      }
+      return uriBuilder.build().toURL();
+    } catch (URISyntaxException ex) {
+      throw new IOException(ex);
+    }
+  }
+
+  private HttpURLConnection configureConnection(HttpURLConnection conn)
+      throws IOException {
+    if (sslFactory != null) {
+      HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
+      try {
+        httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
+      } catch (GeneralSecurityException ex) {
+        throw new IOException(ex);
+      }
+      httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
+    }
+    return conn;
+  }
+
+  private HttpURLConnection createConnection(final URL url, String method)
+      throws IOException {
+    HttpURLConnection conn;
+    try {
+      // if current UGI is different from UGI at constructor time, behave as
+      // proxyuser
+      UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
+      final String doAsUser =
+          (loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
+          ? null : currentUgi.getShortUserName();
+
+      // creating the HTTP connection using the current UGI at constructor time
+      conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
+        @Override
+        public HttpURLConnection run() throws Exception {
+          DelegationTokenAuthenticatedURL authUrl =
+              new DelegationTokenAuthenticatedURL(configurator);
+          return authUrl.openConnection(url, authToken, doAsUser);
+        }
+      });
+    } catch (IOException ex) {
+      throw ex;
+    } catch (Exception ex) {
+      throw new IOException(ex);
+    }
+    conn.setUseCaches(false);
+    conn.setRequestMethod(method);
+    if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
+      conn.setDoOutput(true);
+    }
+    conn = configureConnection(conn);
+    return conn;
+  }
+
+  // trick, riding on generics to throw an undeclared exception
+
+  private static void throwEx(Throwable ex) {
+    KMSClientProvider.<RuntimeException>throwException(ex);
+  }
+
+  @SuppressWarnings("unchecked")
+  private static <E extends Throwable> void throwException(Throwable ex)
+      throws E {
+    throw (E) ex;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void validateResponse(HttpURLConnection conn, int expected)
+      throws IOException {
+    int status = conn.getResponseCode();
+    if (status != expected) {
+      InputStream es = null;
+      try {
+        Exception toThrow;
+        String contentType = conn.getHeaderField(CONTENT_TYPE);
+        if (contentType != null &&
+            contentType.toLowerCase().startsWith(APPLICATION_JSON_MIME)) {
+          es = conn.getErrorStream();
+          ObjectMapper mapper = new ObjectMapper();
+          Map json = mapper.readValue(es, Map.class);
+          String exClass = (String) json.get(
+              KMSRESTConstants.ERROR_EXCEPTION_JSON);
+          String exMsg = (String)
+              json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
+          try {
+            ClassLoader cl = KMSClientProvider.class.getClassLoader();
+            Class klass = cl.loadClass(exClass);
+            Constructor constr = klass.getConstructor(String.class);
+            toThrow = (Exception) constr.newInstance(exMsg);
+          } catch (Exception ex) {
+            toThrow = new IOException(MessageFormat.format(
+                "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
+          }
+        } else {
+          toThrow = new IOException(MessageFormat.format(
+              "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
+        }
+        throwEx(toThrow);
+      } finally {
+        if (es != null) {
+          es.close();
+        }
+      }
+    }
+  }
+
+  private static <T> T call(HttpURLConnection conn, Map jsonOutput,
+      int expectedResponse, Class<T> klass)
+      throws IOException {
+    T ret = null;
+    try {
+      if (jsonOutput != null) {
+        writeJson(jsonOutput, conn.getOutputStream());
+      }
+    } catch (IOException ex) {
+      conn.getInputStream().close();
+      throw ex;
+    }
+    validateResponse(conn, expectedResponse);
+    if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+        && klass != null) {
+      ObjectMapper mapper = new ObjectMapper();
+      InputStream is = null;
+      try {
+        is = conn.getInputStream();
+        ret = mapper.readValue(is, klass);
+      } catch (IOException ex) {
+        if (is != null) {
+          is.close();
+        }
+        throw ex;
+      } finally {
+        if (is != null) {
+          is.close();
+        }
+      }
+    }
+    return ret;
+  }
+
+  public static class KMSKeyVersion extends KeyVersion {
+    public KMSKeyVersion(String keyName, String versionName, byte[] material) {
+      super(keyName, versionName, material);
+    }
+  }
+
+  @Override
+  public KeyVersion getKeyVersion(String versionName) throws IOException {
+    checkNotEmpty(versionName, "versionName");
+    URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
+        versionName, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  public KeyVersion getCurrentKey(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public List<String> getKeys() throws IOException {
+    URL url = createURL(KMSRESTConstants.KEYS_NAMES_RESOURCE, null, null,
+        null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+    return (List<String>) response;
+  }
+
+  public static class KMSMetadata extends Metadata {
+    public KMSMetadata(String cipher, int bitLength, String description,
+        Map<String, String> attributes, Date created, int versions) {
+      super(cipher, bitLength, description, attributes, created, versions);
+    }
+  }
+
+  // breaking keyNames into sets to keep resulting URL undler 2000 chars
+  private List<String[]> createKeySets(String[] keyNames) {
+    List<String[]> list = new ArrayList<String[]>();
+    List<String> batch = new ArrayList<String>();
+    int batchLen = 0;
+    for (String name : keyNames) {
+      int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
+      batchLen += additionalLen;
+      // topping at 1500 to account for initial URL and encoded names
+      if (batchLen > 1500) {
+        list.add(batch.toArray(new String[batch.size()]));
+        batch = new ArrayList<String>();
+        batchLen = additionalLen;
+      }
+      batch.add(name);
+    }
+    if (!batch.isEmpty()) {
+      list.add(batch.toArray(new String[batch.size()]));
+    }
+    return list;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Metadata[] getKeysMetadata(String ... keyNames) throws IOException {
+    List<Metadata> keysMetadata = new ArrayList<Metadata>();
+    List<String[]> keySets = createKeySets(keyNames);
+    for (String[] keySet : keySets) {
+      if (keyNames.length > 0) {
+        Map<String, Object> queryStr = new HashMap<String, Object>();
+        queryStr.put(KMSRESTConstants.KEY, keySet);
+        URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
+            null, queryStr);
+        HttpURLConnection conn = createConnection(url, HTTP_GET);
+        List<Map> list = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+        for (Map map : list) {
+          keysMetadata.add(parseJSONMetadata(map));
+        }
+      }
+    }
+    return keysMetadata.toArray(new Metadata[keysMetadata.size()]);
+  }
+
+  private KeyVersion createKeyInternal(String name, byte[] material,
+      Options options)
+      throws NoSuchAlgorithmException, IOException {
+    checkNotEmpty(name, "name");
+    checkNotNull(options, "options");
+    Map<String, Object> jsonKey = new HashMap<String, Object>();
+    jsonKey.put(KMSRESTConstants.NAME_FIELD, name);
+    jsonKey.put(KMSRESTConstants.CIPHER_FIELD, options.getCipher());
+    jsonKey.put(KMSRESTConstants.LENGTH_FIELD, options.getBitLength());
+    if (material != null) {
+      jsonKey.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64String(material));
+    }
+    if (options.getDescription() != null) {
+      jsonKey.put(KMSRESTConstants.DESCRIPTION_FIELD,
+          options.getDescription());
+    }
+    if (options.getAttributes() != null && !options.getAttributes().isEmpty()) {
+      jsonKey.put(KMSRESTConstants.ATTRIBUTES_FIELD, options.getAttributes());
+    }
+    URL url = createURL(KMSRESTConstants.KEYS_RESOURCE, null, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response = call(conn, jsonKey, HttpURLConnection.HTTP_CREATED,
+        Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, Options options)
+      throws NoSuchAlgorithmException, IOException {
+    return createKeyInternal(name, null, options);
+  }
+
+  @Override
+  public KeyVersion createKey(String name, byte[] material, Options options)
+      throws IOException {
+    checkNotNull(material, "material");
+    try {
+      return createKeyInternal(name, material, options);
+    } catch (NoSuchAlgorithmException ex) {
+      throw new RuntimeException("It should not happen", ex);
+    }
+  }
+
+  private KeyVersion rollNewVersionInternal(String name, byte[] material)
+      throws NoSuchAlgorithmException, IOException {
+    checkNotEmpty(name, "name");
+    Map<String, String> jsonMaterial = new HashMap<String, String>();
+    if (material != null) {
+      jsonMaterial.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64String(material));
+    }
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response = call(conn, jsonMaterial,
+        HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+
+  @Override
+  public KeyVersion rollNewVersion(String name)
+      throws NoSuchAlgorithmException, IOException {
+    return rollNewVersionInternal(name, null);
+  }
+
+  @Override
+  public KeyVersion rollNewVersion(String name, byte[] material)
+      throws IOException {
+    checkNotNull(material, "material");
+    try {
+      return rollNewVersionInternal(name, material);
+    } catch (NoSuchAlgorithmException ex) {
+      throw new RuntimeException("It should not happen", ex);
+    }
+  }
+
+  @Override
+  public EncryptedKeyVersion generateEncryptedKey(
+      String encryptionKeyName) throws IOException, GeneralSecurityException {
+    try {
+      return encKeyVersionQueue.getNext(encryptionKeyName);
+    } catch (ExecutionException e) {
+      if (e.getCause() instanceof SocketTimeoutException) {
+        throw (SocketTimeoutException)e.getCause();
+      }
+      throw new IOException(e);
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Override
+  public KeyVersion decryptEncryptedKey(
+      EncryptedKeyVersion encryptedKeyVersion) throws IOException,
+                                                      GeneralSecurityException {
+    checkNotNull(encryptedKeyVersion.getEncryptionKeyVersionName(),
+        "versionName");
+    checkNotNull(encryptedKeyVersion.getEncryptedKeyIv(), "iv");
+    Preconditions.checkArgument(
+        encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+            .equals(KeyProviderCryptoExtension.EEK),
+        "encryptedKey version name must be '%s', is '%s'",
+        KeyProviderCryptoExtension.EEK,
+        encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+    );
+    checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_DECRYPT);
+    Map<String, Object> jsonPayload = new HashMap<String, Object>();
+    jsonPayload.put(KMSRESTConstants.NAME_FIELD,
+        encryptedKeyVersion.getEncryptionKeyName());
+    jsonPayload.put(KMSRESTConstants.IV_FIELD, Base64.encodeBase64String(
+        encryptedKeyVersion.getEncryptedKeyIv()));
+    jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(
+            encryptedKeyVersion.getEncryptedKeyVersion().getMaterial()));
+    URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
+        encryptedKeyVersion.getEncryptionKeyVersionName(),
+        KMSRESTConstants.EEK_SUB_RESOURCE, params);
+    HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    Map response =
+        call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONKeyVersion(response);
+  }
+
+  @Override
+  public List<KeyVersion> getKeyVersions(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.VERSIONS_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
+    List<KeyVersion> versions = null;
+    if (!response.isEmpty()) {
+      versions = new ArrayList<KeyVersion>();
+      for (Object obj : response) {
+        versions.add(parseJSONKeyVersion((Map) obj));
+      }
+    }
+    return versions;
+  }
+
+  @Override
+  public Metadata getMetadata(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.METADATA_SUB_RESOURCE, null);
+    HttpURLConnection conn = createConnection(url, HTTP_GET);
+    Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONMetadata(response);
+  }
+
+  @Override
+  public void deleteKey(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
+    HttpURLConnection conn = createConnection(url, HTTP_DELETE);
+    call(conn, null, HttpURLConnection.HTTP_OK, null);
+  }
+
+  @Override
+  public void flush() throws IOException {
+    // NOP
+    // the client does not keep any local state, thus flushing is not required
+    // because of the client.
+    // the server should not keep in memory state on behalf of clients either.
+  }
+
+  @Override
+  public void warmUpEncryptedKeys(String... keyNames)
+      throws IOException {
+    try {
+      encKeyVersionQueue.initializeQueuesForKeys(keyNames);
+    } catch (ExecutionException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public Token<?>[] addDelegationTokens(String renewer,
+      Credentials credentials) throws IOException {
+    Token<?>[] tokens;
+    URL url = createURL(null, null, null, null);
+    DelegationTokenAuthenticatedURL authUrl =
+        new DelegationTokenAuthenticatedURL(configurator);
+    try {
+      Token<?> token = authUrl.getDelegationToken(url, authToken, renewer);
+      if (token != null) {
+        credentials.addToken(token.getService(), token);
+        tokens = new Token<?>[] { token };
+      } else {
+        throw new IOException("Got NULL as delegation token");
+      }
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex);
+    }
+    return tokens;
+  }
+
+}

+ 62 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * KMS REST and JSON constants and utility methods for the KMSServer.
+ */
+@InterfaceAudience.Private
+public class KMSRESTConstants {
+
+  public static final String SERVICE_VERSION = "/v1";
+  public static final String KEY_RESOURCE = "key";
+  public static final String KEYS_RESOURCE = "keys";
+  public static final String KEYS_METADATA_RESOURCE = KEYS_RESOURCE +
+      "/metadata";
+  public static final String KEYS_NAMES_RESOURCE = KEYS_RESOURCE + "/names";
+  public static final String KEY_VERSION_RESOURCE = "keyversion";
+  public static final String METADATA_SUB_RESOURCE = "_metadata";
+  public static final String VERSIONS_SUB_RESOURCE = "_versions";
+  public static final String EEK_SUB_RESOURCE = "_eek";
+  public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
+
+  public static final String KEY = "key";
+  public static final String EEK_OP = "eek_op";
+  public static final String EEK_GENERATE = "generate";
+  public static final String EEK_DECRYPT = "decrypt";
+  public static final String EEK_NUM_KEYS = "num_keys";
+
+  public static final String IV_FIELD = "iv";
+  public static final String NAME_FIELD = "name";
+  public static final String CIPHER_FIELD = "cipher";
+  public static final String LENGTH_FIELD = "length";
+  public static final String DESCRIPTION_FIELD = "description";
+  public static final String ATTRIBUTES_FIELD = "attributes";
+  public static final String CREATED_FIELD = "created";
+  public static final String VERSIONS_FIELD = "versions";
+  public static final String MATERIAL_FIELD = "material";
+  public static final String VERSION_NAME_FIELD = "versionName";
+  public static final String ENCRYPTED_KEY_VERSION_FIELD =
+      "encryptedKeyVersion";
+
+  public static final String ERROR_EXCEPTION_JSON = "exception";
+  public static final String ERROR_MESSAGE_JSON = "message";
+
+}

+ 317 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A Utility class that maintains a Queue of entries for a given key. It tries
+ * to ensure that there is are always at-least <code>numValues</code> entries
+ * available for the client to consume for a particular key.
+ * It also uses an underlying Cache to evict queues for keys that have not been
+ * accessed for a configurable period of time.
+ * Implementing classes are required to implement the
+ * <code>QueueRefiller</code> interface that exposes a method to refill the
+ * queue, when empty
+ */
+@InterfaceAudience.Private
+public class ValueQueue <E> {
+
+  /**
+   * QueueRefiller interface a client must implement to use this class
+   */
+  public interface QueueRefiller <E> {
+    /**
+     * Method that has to be implemented by implementing classes to fill the
+     * Queue.
+     * @param keyName Key name
+     * @param keyQueue Queue that needs to be filled
+     * @param numValues number of Values to be added to the queue.
+     * @throws IOException
+     */
+    public void fillQueueForKey(String keyName,
+        Queue<E> keyQueue, int numValues) throws IOException;
+  }
+
+  private static final String REFILL_THREAD =
+      ValueQueue.class.getName() + "_thread";
+
+  private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
+  private final ThreadPoolExecutor executor;
+  private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
+  private final QueueRefiller<E> refiller;
+  private final SyncGenerationPolicy policy;
+
+  private final int numValues;
+  private final float lowWatermark;
+
+  /**
+   * A <code>Runnable</code> which takes a string name.
+   */
+  private abstract static class NamedRunnable implements Runnable {
+    final String name;
+    private NamedRunnable(String keyName) {
+      this.name = keyName;
+    }
+  }
+
+  /**
+   * This backing blocking queue used in conjunction with the
+   * <code>ThreadPoolExecutor</code> used by the <code>ValueQueue</code>. This
+   * Queue accepts a task only if the task is not currently in the process
+   * of being run by a thread which is implied by the presence of the key
+   * in the <code>keysInProgress</code> set.
+   *
+   * NOTE: Only methods that ware explicitly called by the
+   * <code>ThreadPoolExecutor</code> need to be over-ridden.
+   */
+  private static class UniqueKeyBlockingQueue extends
+      LinkedBlockingQueue<Runnable> {
+
+    private static final long serialVersionUID = -2152747693695890371L;
+    private HashSet<String> keysInProgress = new HashSet<String>();
+
+    @Override
+    public synchronized void put(Runnable e) throws InterruptedException {
+      if (keysInProgress.add(((NamedRunnable)e).name)) {
+        super.put(e);
+      }
+    }
+
+    @Override
+    public Runnable take() throws InterruptedException {
+      Runnable k = super.take();
+      if (k != null) {
+        keysInProgress.remove(((NamedRunnable)k).name);
+      }
+      return k;
+    }
+
+    @Override
+    public Runnable poll(long timeout, TimeUnit unit)
+        throws InterruptedException {
+      Runnable k = super.poll(timeout, unit);
+      if (k != null) {
+        keysInProgress.remove(((NamedRunnable)k).name);
+      }
+      return k;
+    }
+
+  }
+
+  /**
+   * Policy to decide how many values to return to client when client asks for
+   * "n" values and Queue is empty.
+   * This decides how many values to return when client calls "getAtMost"
+   */
+  public static enum SyncGenerationPolicy {
+    ATLEAST_ONE, // Return atleast 1 value
+    LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
+    ALL // Return n values
+  }
+
+  /**
+   * Constructor takes the following tunable configuration parameters
+   * @param numValues The number of values cached in the Queue for a
+   *    particular key.
+   * @param lowWatermark The ratio of (number of current entries/numValues)
+   *    below which the <code>fillQueueForKey()</code> funciton will be
+   *    invoked to fill the Queue.
+   * @param expiry Expiry time after which the Key and associated Queue are
+   *    evicted from the cache.
+   * @param numFillerThreads Number of threads to use for the filler thread
+   * @param policy The SyncGenerationPolicy to use when client
+   *    calls "getAtMost"
+   * @param refiller implementation of the QueueRefiller
+   */
+  public ValueQueue(final int numValues, final float lowWatermark,
+      long expiry, int numFillerThreads, SyncGenerationPolicy policy,
+      final QueueRefiller<E> refiller) {
+    Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0");
+    Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)),
+        "\"lowWatermark\" must be > 0 and <= 1");
+    Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0");
+    Preconditions.checkArgument(numFillerThreads > 0,
+        "\"numFillerThreads\" must be > 0");
+    Preconditions.checkNotNull(policy, "\"policy\" must not be null");
+    this.refiller = refiller;
+    this.policy = policy;
+    this.numValues = numValues;
+    this.lowWatermark = lowWatermark;
+    keyQueues = CacheBuilder.newBuilder()
+            .expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
+            .build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
+                  @Override
+                  public LinkedBlockingQueue<E> load(String keyName)
+                      throws Exception {
+                    LinkedBlockingQueue<E> keyQueue =
+                        new LinkedBlockingQueue<E>();
+                    refiller.fillQueueForKey(keyName, keyQueue,
+                        (int)(lowWatermark * numValues));
+                    return keyQueue;
+                  }
+                });
+
+    executor =
+        new ThreadPoolExecutor(numFillerThreads, numFillerThreads, 0L,
+            TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
+                .setDaemon(true)
+                .setNameFormat(REFILL_THREAD).build());
+    // To ensure all requests are first queued, make coreThreads = maxThreads
+    // and pre-start all the Core Threads.
+    executor.prestartAllCoreThreads();
+  }
+
+  public ValueQueue(final int numValues, final float lowWaterMark, long expiry,
+      int numFillerThreads, QueueRefiller<E> fetcher) {
+    this(numValues, lowWaterMark, expiry, numFillerThreads,
+        SyncGenerationPolicy.ALL, fetcher);
+  }
+
+  /**
+   * Initializes the Value Queues for the provided keys by calling the
+   * fill Method with "numInitValues" values
+   * @param keyNames Array of key Names
+   * @throws ExecutionException
+   */
+  public void initializeQueuesForKeys(String... keyNames)
+      throws ExecutionException {
+    for (String keyName : keyNames) {
+      keyQueues.get(keyName);
+    }
+  }
+
+  /**
+   * This removes the value currently at the head of the Queue for the
+   * provided key. Will immediately fire the Queue filler function if key
+   * does not exist.
+   * If Queue exists but all values are drained, It will ask the generator
+   * function to add 1 value to Queue and then drain it.
+   * @param keyName String key name
+   * @return E the next value in the Queue
+   * @throws IOException
+   * @throws ExecutionException
+   */
+  public E getNext(String keyName)
+      throws IOException, ExecutionException {
+    return getAtMost(keyName, 1).get(0);
+  }
+
+  /**
+   * This removes the "num" values currently at the head of the Queue for the
+   * provided key. Will immediately fire the Queue filler function if key
+   * does not exist
+   * How many values are actually returned is governed by the
+   * <code>SyncGenerationPolicy</code> specified by the user.
+   * @param keyName String key name
+   * @param num Minimum number of values to return.
+   * @return List<E> values returned
+   * @throws IOException
+   * @throws ExecutionException
+   */
+  public List<E> getAtMost(String keyName, int num) throws IOException,
+      ExecutionException {
+    LinkedBlockingQueue<E> keyQueue = keyQueues.get(keyName);
+    // Using poll to avoid race condition..
+    LinkedList<E> ekvs = new LinkedList<E>();
+    try {
+      for (int i = 0; i < num; i++) {
+        E val = keyQueue.poll();
+        // If queue is empty now, Based on the provided SyncGenerationPolicy,
+        // figure out how many new values need to be generated synchronously
+        if (val == null) {
+          // Synchronous call to get remaining values
+          int numToFill = 0;
+          switch (policy) {
+          case ATLEAST_ONE:
+            numToFill = (ekvs.size() < 1) ? 1 : 0;
+            break;
+          case LOW_WATERMARK:
+            numToFill =
+                Math.min(num, (int) (lowWatermark * numValues)) - ekvs.size();
+            break;
+          case ALL:
+            numToFill = num - ekvs.size();
+            break;
+          }
+          // Synchronous fill if not enough values found
+          if (numToFill > 0) {
+            refiller.fillQueueForKey(keyName, ekvs, numToFill);
+          }
+          // Asynch task to fill > lowWatermark
+          if (i <= (int) (lowWatermark * numValues)) {
+            submitRefillTask(keyName, keyQueue);
+          }
+          return ekvs;
+        }
+        ekvs.add(val);
+      }
+    } catch (Exception e) {
+      throw new IOException("Exeption while contacting value generator ", e);
+    }
+    return ekvs;
+  }
+
+  private void submitRefillTask(final String keyName,
+      final Queue<E> keyQueue) throws InterruptedException {
+    // The submit/execute method of the ThreadPoolExecutor is bypassed and
+    // the Runnable is directly put in the backing BlockingQueue so that we
+    // can control exactly how the runnable is inserted into the queue.
+    queue.put(
+        new NamedRunnable(keyName) {
+          @Override
+          public void run() {
+            int cacheSize = numValues;
+            int threshold = (int) (lowWatermark * (float) cacheSize);
+            // Need to ensure that only one refill task per key is executed
+            try {
+              if (keyQueue.size() < threshold) {
+                refiller.fillQueueForKey(name, keyQueue,
+                    cacheSize - keyQueue.size());
+              }
+            } catch (final Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        }
+        );
+  }
+
+  /**
+   * Cleanly shutdown
+   */
+  public void shutdown() {
+    executor.shutdownNow();
+  }
+
+}

+ 134 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -44,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
@@ -804,6 +804,18 @@ public abstract class AbstractFileSystem {
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 
+  /**
+   * The specification of this method matches that of
+   * {@link FileContext#access(Path, FsAction)}
+   * except that an UnresolvedLinkException may be thrown if a symlink is
+   * encountered in the path.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
   /**
    * The specification of this method matches that of
    * {@link FileContext#getFileLinkStatus(Path)}
@@ -1039,6 +1051,127 @@ public abstract class AbstractFileSystem {
         + " doesn't support getAclStatus");
   }
 
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @throws IOException
+   */
+  public void setXAttr(Path path, String name, byte[] value)
+      throws IOException {
+    setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
+        XAttrSetFlag.REPLACE));
+  }
+
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @param flag xattr set flag
+   * @throws IOException
+   */
+  public void setXAttr(Path path, String name, byte[] value,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support setXAttr");
+  }
+
+  /**
+   * Get an xattr for a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attribute
+   * @param name xattr name.
+   * @return byte[] xattr value.
+   * @throws IOException
+   */
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttr");
+  }
+
+  /**
+   * Get all of the xattrs for a file or directory.
+   * Only those xattrs for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttrs");
+  }
+
+  /**
+   * Get all of the xattrs for a file or directory.
+   * Only those xattrs for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @param names XAttr names.
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttrs");
+  }
+
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only the xattr names for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path)
+          throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
+  /**
+   * Remove an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to remove extended attribute
+   * @param name xattr name
+   * @throws IOException
+   */
+  public void removeXAttr(Path path, String name) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support removeXAttr");
+  }
+
   @Override //Object
   public int hashCode() {
     return myUri.hashCode();

+ 9 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.BufferedInputStream;
+import java.io.EOFException;
 import java.io.FileDescriptor;
 import java.io.IOException;
 
@@ -51,6 +52,9 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
 
   @Override
   public long getPos() throws IOException {
+    if (in == null) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
     return ((FSInputStream)in).getPos()-(count-pos);
   }
 
@@ -66,8 +70,11 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
 
   @Override
   public void seek(long pos) throws IOException {
-    if( pos<0 ) {
-      return;
+    if (in == null) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+    if (pos < 0) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
     }
     if (this.pos != this.count) {
       // optimize: check if the pos is in the buffer

+ 13 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.fs;
 
-import java.io.*;
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -26,8 +29,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.PureJavaCrc32;
 
 /****************************************************************
  * Abstract Checksumed FileSystem.
@@ -147,7 +150,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         if (!Arrays.equals(version, CHECKSUM_VERSION))
           throw new IOException("Not a checksum file: "+sumFile);
         this.bytesPerSum = sums.readInt();
-        set(fs.verifyChecksum, new PureJavaCrc32(), bytesPerSum, 4);
+        set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
       } catch (FileNotFoundException e) {         // quietly ignore
         set(fs.verifyChecksum, null, 1, 0);
       } catch (IOException e) {                   // loudly ignore
@@ -259,8 +262,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     private Path file;
     private long fileLen = -1L;
 
-    FSDataBoundedInputStream(FileSystem fs, Path file, InputStream in)
-        throws IOException {
+    FSDataBoundedInputStream(FileSystem fs, Path file, InputStream in) {
       super(in);
       this.fs = fs;
       this.file = file;
@@ -317,8 +319,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
 
     @Override
     public synchronized void seek(long pos) throws IOException {
-      if(pos>getFileLength()) {
-        throw new IOException("Cannot seek after EOF");
+      if (pos > getFileLength()) {
+        throw new EOFException("Cannot seek after EOF");
       }
       super.seek(pos);
     }
@@ -379,7 +381,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
                           long blockSize,
                           Progressable progress)
       throws IOException {
-      super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
+      super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
       int bytesPerSum = fs.getBytesPerSum();
       this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                          replication, blockSize, progress);
@@ -435,7 +437,9 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         throw new FileNotFoundException("Parent directory doesn't exist: "
             + parent);
       } else if (!mkdirs(parent)) {
-        throw new IOException("Mkdirs failed to create " + parent);
+        throw new IOException("Mkdirs failed to create " + parent
+            + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
+            + ")");
       }
     }
     final FSDataOutputStream out;

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.fs;
 
-import java.io.*;
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.net.URISyntaxException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
@@ -31,8 +33,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.PureJavaCrc32;
 
 /**
  * Abstract Checksumed Fs.
@@ -139,7 +141,7 @@ public abstract class ChecksumFs extends FilterFs {
           throw new IOException("Not a checksum file: "+sumFile);
         }
         this.bytesPerSum = sums.readInt();
-        set(fs.verifyChecksum, new PureJavaCrc32(), bytesPerSum, 4);
+        set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
       } catch (FileNotFoundException e) {         // quietly ignore
         set(fs.verifyChecksum, null, 1, 0);
       } catch (IOException e) {                   // loudly ignore
@@ -335,7 +337,7 @@ public abstract class ChecksumFs extends FilterFs {
       final short replication, final long blockSize, 
       final Progressable progress, final ChecksumOpt checksumOpt,
       final boolean createParent) throws IOException {
-      super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
+      super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
 
       // checksumOpt is passed down to the raw fs. Unless it implements
       // checksum impelemts internally, checksumOpt will be ignored.

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -131,6 +131,12 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
    * Service Authorization
    */
   public static final String 
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL = 
+      "security.service.authorization.default.acl";
+  public static final String 
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL =
+      "security.service.authorization.default.acl.blocked";
+  public static final String
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY = 
       "security.refresh.policy.protocol.acl";
   public static final String 
@@ -139,6 +145,12 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String 
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
       "security.refresh.user.mappings.protocol.acl";
+  public static final String
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE =
+      "security.refresh.callqueue.protocol.acl";
+  public static final String
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH =
+      "security.refresh.generic.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 
@@ -199,6 +211,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
       "ha.failover-controller.graceful-fence.connection.retries";
   public static final int HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT = 1;
 
+  /** number of zookeeper operation retry times in ActiveStandbyElector */
+  public static final String HA_FC_ELECTOR_ZK_OP_RETRIES_KEY =
+      "ha.failover-controller.active-standby-elector.zk.op.retries";
+  public static final int HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT = 3;
+
   /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
   public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
     "ha.failover-controller.cli-check.rpc-timeout.ms";
@@ -242,6 +259,10 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = "ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 
+  public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY =
+    "ipc.client.connect.max.retries.on.sasl";
+  public static final int    IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT = 5;
+
   /** How often the server scans for idle connections */
   public static final String IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY =
       "ipc.client.connection.idle-scan-interval.ms";
@@ -257,4 +278,10 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
       "rpc.metrics.percentiles.intervals";
+  
+  /** Allowed hosts for nfs exports */
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
+
 }

+ 40 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -78,6 +78,8 @@ public class CommonConfigurationKeysPublic {
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
     "net.topology.table.file.name";
+  public static final String NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY = 
+    "net.topology.dependency.script.file.name";
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  FS_TRASH_CHECKPOINT_INTERVAL_KEY =
@@ -205,7 +207,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
-  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
     "ipc.server.listen.queue.size";
@@ -224,7 +226,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_SERVER_TCPNODELAY_KEY =
     "ipc.server.tcpnodelay";
   /** Default value for IPC_SERVER_TCPNODELAY_KEY */
-  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
@@ -248,6 +250,12 @@ public class CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
     300;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS =
+    "hadoop.security.groups.negative-cache.secs";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final long HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT =
+    30;
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
     "hadoop.security.groups.cache.warn.after.ms";
   public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
@@ -280,5 +288,35 @@ public class CommonConfigurationKeysPublic {
   /** Class to override Sasl Properties for a connection */
   public static final String  HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS =
     "hadoop.security.saslproperties.resolver.class";
+  /** Class to override Impersonation provider */
+  public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
+    "hadoop.security.impersonation.provider.class";
+
+  //  <!--- KMSClientProvider configurations —>
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
+      "hadoop.security.kms.client.encrypted.key.cache.size";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK =
+      "hadoop.security.kms.client.encrypted.key.cache.low-watermark";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */
+  public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT =
+      0.3f;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS =
+      "hadoop.security.kms.client.encrypted.key.cache.num.refill.threads";
+  /** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT =
+      2;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS =
+      "hadoop.security.kms.client.encrypted.key.cache.expiry";
+  /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
+  public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
 }
 

+ 37 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.StringUtils;
 
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
@@ -102,7 +103,7 @@ public class ContentSummary implements Writable{
    * <----12----> <----12----> <-------18------->
    *    DIR_COUNT   FILE_COUNT       CONTENT_SIZE FILE_NAME    
    */
-  private static final String STRING_FORMAT = "%12d %12d %18d ";
+  private static final String STRING_FORMAT = "%12s %12s %18s ";
   /** 
    * Output format:
    * <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18------->
@@ -117,7 +118,7 @@ public class ContentSummary implements Writable{
 
   private static final String QUOTA_HEADER = String.format(
       QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
-      "quota", "remaining quota", "space quota", "reamaining quota") +
+      "name quota", "rem name quota", "space quota", "rem space quota") +
       HEADER;
   
   /** Return the header of the output.
@@ -139,11 +140,25 @@ public class ContentSummary implements Writable{
   /** Return the string representation of the object in the output format.
    * if qOption is false, output directory count, file count, and content size;
    * if qOption is true, output quota and remaining quota as well.
+   *
+   * @param qOption a flag indicating if quota needs to be printed or not
+   * @return the string representation of the object
+  */
+  public String toString(boolean qOption) {
+    return toString(qOption, false);
+  }
+
+  /** Return the string representation of the object in the output format.
+   * if qOption is false, output directory count, file count, and content size;
+   * if qOption is true, output quota and remaining quota as well.
+   * if hOption is false file sizes are returned in bytes
+   * if hOption is true file sizes are returned in human readable 
    * 
    * @param qOption a flag indicating if quota needs to be printed or not
+   * @param hOption a flag indicating if human readable output if to be used
    * @return the string representation of the object
    */
-  public String toString(boolean qOption) {
+  public String toString(boolean qOption, boolean hOption) {
     String prefix = "";
     if (qOption) {
       String quotaStr = "none";
@@ -152,19 +167,32 @@ public class ContentSummary implements Writable{
       String spaceQuotaRem = "inf";
       
       if (quota>0) {
-        quotaStr = Long.toString(quota);
-        quotaRem = Long.toString(quota-(directoryCount+fileCount));
+        quotaStr = formatSize(quota, hOption);
+        quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
       }
       if (spaceQuota>0) {
-        spaceQuotaStr = Long.toString(spaceQuota);
-        spaceQuotaRem = Long.toString(spaceQuota - spaceConsumed);        
+        spaceQuotaStr = formatSize(spaceQuota, hOption);
+        spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
       }
       
       prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
                              quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
     }
     
-    return prefix + String.format(STRING_FORMAT, directoryCount, 
-                                  fileCount, length);
+    return prefix + String.format(STRING_FORMAT,
+     formatSize(directoryCount, hOption),
+     formatSize(fileCount, hOption),
+     formatSize(length, hOption));
+  }
+  /**
+   * Formats a size to be human readable or in bytes
+   * @param size value to be formatted
+   * @param humanReadable flag indicating human readable or not
+   * @return String representation of the size
+  */
+  private String formatSize(long size, boolean humanReadable) {
+    return humanReadable
+      ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
+      : String.valueOf(size);
   }
 }

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -67,7 +67,10 @@ public class FSDataOutputStream extends DataOutputStream
 
     @Override
     public void close() throws IOException {
-      out.close();
+      // ensure close works even if a null reference was passed in
+      if (out != null) {
+        out.close();
+      }
     }
   }
 

+ 43 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java

@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+/**
+ * Standard strings to use in exception messages in filesystems
+ * HDFS is used as the reference source of the strings
+ */
+public class FSExceptionMessages {
+
+  /**
+   * The operation failed because the stream is closed: {@value}
+   */
+  public static final String STREAM_IS_CLOSED = "Stream is closed!";
+
+  /**
+   * Negative offset seek forbidden : {@value}
+   */
+  public static final String NEGATIVE_SEEK =
+    "Cannot seek to a negative offset";
+
+  /**
+   * Seeks : {@value}
+   */
+  public static final String CANNOT_SEEK_PAST_EOF =
+      "Attempted to seek or read past the end of the file";
+}

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.zip.Checksum;
@@ -394,8 +395,8 @@ abstract public class FSInputChecker extends FSInputStream {
 
   @Override
   public synchronized void seek(long pos) throws IOException {
-    if( pos<0 ) {
-      return;
+    if( pos < 0 ) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
     }
     // optimize: check if the pos is in the buffer
     long start = chunkPos - this.count;

+ 208 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@@ -1108,6 +1109,55 @@ public final class FileContext {
     }.resolve(this, absF);
   }
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws UnsupportedFileSystemException if file system for <code>path</code>
+   *   is not supported
+   * @throws IOException see specific implementation
+   * 
+   * Exceptions applicable to file systems accessed over RPC:
+   * @throws RpcClientException If an exception occurred in the RPC client
+   * @throws RpcServerException If an exception occurred in the RPC server
+   * @throws UnexpectedServerException If server implementation throws 
+   *           undeclared exception to RPC server
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(final Path path, final FsAction mode)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    final Path absPath = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(AbstractFileSystem fs, Path p) throws IOException,
+          UnresolvedLinkException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absPath);
+  }
+
   /**
    * Return a file status object that represents the path. If the path 
    * refers to a symlink then the FileStatus of the symlink is returned.
@@ -2294,4 +2344,162 @@ public final class FileContext {
       }
     }.resolve(this, absF);
   }
+
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @throws IOException
+   */
+  public void setXAttr(Path path, String name, byte[] value)
+      throws IOException {
+    setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
+        XAttrSetFlag.REPLACE));
+  }
+
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @param flag xattr set flag
+   * @throws IOException
+   */
+  public void setXAttr(Path path, final String name, final byte[] value,
+      final EnumSet<XAttrSetFlag> flag) throws IOException {
+    final Path absF = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        fs.setXAttr(p, name, value, flag);
+        return null;
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Get an xattr for a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attribute
+   * @param name xattr name.
+   * @return byte[] xattr value.
+   * @throws IOException
+   */
+  public byte[] getXAttr(Path path, final String name) throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<byte[]>() {
+      @Override
+      public byte[] next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.getXAttr(p, name);
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Get all of the xattrs for a file or directory.
+   * Only those xattrs for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<Map<String, byte[]>>() {
+      @Override
+      public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.getXAttrs(p);
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Get all of the xattrs for a file or directory.
+   * Only those xattrs for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @param names XAttr names.
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
+      throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<Map<String, byte[]>>() {
+      @Override
+      public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.getXAttrs(p, names);
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Remove an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to remove extended attribute
+   * @param name xattr name
+   * @throws IOException
+   */
+  public void removeXAttr(Path path, final String name) throws IOException {
+    final Path absF = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        fs.removeXAttr(p, name);
+        return null;
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return List<String> of the XAttr names of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<List<String>>() {
+      @Override
+      public List<String> next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.listXAttrs(p);
+      }
+    }.resolve(this, absF);
+  }
 }

+ 15 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -99,6 +99,21 @@ public class FileStatus implements Writable, Comparable {
     assert (isdir && symlink == null) || !isdir;
   }
 
+  /**
+   * Copy constructor.
+   *
+   * @param other FileStatus to copy
+   */
+  public FileStatus(FileStatus other) throws IOException {
+    // It's important to call the getters here instead of directly accessing the
+    // members.  Subclasses like ViewFsFileStatus can override the getters.
+    this(other.getLen(), other.isDirectory(), other.getReplication(),
+      other.getBlockSize(), other.getModificationTime(), other.getAccessTime(),
+      other.getPermission(), other.getOwner(), other.getGroup(),
+      (other.isSymlink() ? other.getSymlink() : null),
+      other.getPath());
+  }
+
   /**
    * Get the length of this file, in bytes.
    * @return the length of this file, in bytes.

+ 227 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -25,6 +25,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Text;
@@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public abstract FileStatus getFileStatus(Path f) throws IOException;
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
+  /**
+   * This method provides the default implementation of
+   * {@link #access(Path, FsAction)}.
+   *
+   * @param stat FileStatus to check
+   * @param mode type of access to check
+   * @throws IOException for any error
+   */
+  @InterfaceAudience.Private
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+      throws IOException {
+    FsPermission perm = stat.getPermission();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    String user = ugi.getShortUserName();
+    List<String> groups = Arrays.asList(ugi.getGroupNames());
+    if (user.equals(stat.getOwner())) {
+      if (perm.getUserAction().implies(mode)) {
+        return;
+      }
+    } else if (groups.contains(stat.getGroup())) {
+      if (perm.getGroupAction().implies(mode)) {
+        return;
+      }
+    } else {
+      if (perm.getOtherAction().implies(mode)) {
+        return;
+      }
+    }
+    throw new AccessControlException(String.format(
+      "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
+      stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
+  }
+
   /**
    * See {@link FileContext#fixRelativePart}
    */
@@ -2140,9 +2207,21 @@ public abstract class FileSystem extends Configured implements Closeable {
    *  in the corresponding FileSystem.
    */
   public FileChecksum getFileChecksum(Path f) throws IOException {
+    return getFileChecksum(f, Long.MAX_VALUE);
+  }
+
+  /**
+   * Get the checksum of a file, from the beginning of the file till the
+   * specific length.
+   * @param f The file path
+   * @param length The length of the file range for checksum calculation
+   * @return The file checksum.
+   */
+  public FileChecksum getFileChecksum(Path f, final long length)
+      throws IOException {
     return null;
   }
-  
+
   /**
    * Set the verify checksum flag. This is only applicable if the 
    * corresponding FileSystem supports checksum. By default doesn't do anything.
@@ -2350,6 +2429,126 @@ public abstract class FileSystem extends Configured implements Closeable {
         + " doesn't support getAclStatus");
   }
 
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @throws IOException
+   */
+  public void setXAttr(Path path, String name, byte[] value)
+      throws IOException {
+    setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
+        XAttrSetFlag.REPLACE));
+  }
+
+  /**
+   * Set an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to modify
+   * @param name xattr name.
+   * @param value xattr value.
+   * @param flag xattr set flag
+   * @throws IOException
+   */
+  public void setXAttr(Path path, String name, byte[] value,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support setXAttr");
+  }
+
+  /**
+   * Get an xattr name and value for a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attribute
+   * @param name xattr name.
+   * @return byte[] xattr value.
+   * @throws IOException
+   */
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttr");
+  }
+
+  /**
+   * Get all of the xattr name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttrs");
+  }
+
+  /**
+   * Get all of the xattrs name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @param names XAttr names.
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getXAttrs");
+  }
+
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to get extended attributes
+   * @return List<String> of the XAttr names of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
+  /**
+   * Remove an xattr of a file or directory.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
+   * <p/>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
+   * @param path Path to remove extended attribute
+   * @param name xattr name
+   * @throws IOException
+   */
+  public void removeXAttr(Path path, String name) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support removeXAttr");
+  }
+
   // making it volatile to be able to do a double checked locking
   private volatile static boolean FILE_SYSTEMS_LOADED = false;
 
@@ -2610,7 +2809,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * be perceived as atomic with respect to other threads, which is all we
      * need.
      */
-    private static class StatisticsData {
+    public static class StatisticsData {
       volatile long bytesRead;
       volatile long bytesWritten;
       volatile int readOps;
@@ -2655,6 +2854,26 @@ public abstract class FileSystem extends Configured implements Closeable {
             + readOps + " read ops, " + largeReadOps + " large read ops, "
             + writeOps + " write ops";
       }
+      
+      public long getBytesRead() {
+        return bytesRead;
+      }
+      
+      public long getBytesWritten() {
+        return bytesWritten;
+      }
+      
+      public int getReadOps() {
+        return readOps;
+      }
+      
+      public int getLargeReadOps() {
+        return largeReadOps;
+      }
+      
+      public int getWriteOps() {
+        return writeOps;
+      }
     }
 
     private interface StatisticsAggregator<T> {
@@ -2713,7 +2932,7 @@ public abstract class FileSystem extends Configured implements Closeable {
     /**
      * Get or create the thread-local data associated with the current thread.
      */
-    private StatisticsData getThreadData() {
+    public StatisticsData getThreadStatistics() {
       StatisticsData data = threadData.get();
       if (data == null) {
         data = new StatisticsData(
@@ -2734,7 +2953,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes read
      */
     public void incrementBytesRead(long newBytes) {
-      getThreadData().bytesRead += newBytes;
+      getThreadStatistics().bytesRead += newBytes;
     }
     
     /**
@@ -2742,7 +2961,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes written
      */
     public void incrementBytesWritten(long newBytes) {
-      getThreadData().bytesWritten += newBytes;
+      getThreadStatistics().bytesWritten += newBytes;
     }
     
     /**
@@ -2750,7 +2969,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of read operations
      */
     public void incrementReadOps(int count) {
-      getThreadData().readOps += count;
+      getThreadStatistics().readOps += count;
     }
 
     /**
@@ -2758,7 +2977,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of large read operations
      */
     public void incrementLargeReadOps(int count) {
-      getThreadData().largeReadOps += count;
+      getThreadStatistics().largeReadOps += count;
     }
 
     /**
@@ -2766,7 +2985,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of write operations
      */
     public void incrementWriteOps(int count) {
-      getThreadData().writeOps += count;
+      getThreadStatistics().writeOps += count;
     }
 
     /**

+ 52 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -23,14 +23,15 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
@@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
     return fs.getFileStatus(f);
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    fs.access(path, mode);
+  }
+
   public void createSymlink(final Path target, final Path link,
       final boolean createParent) throws AccessControlException,
       FileAlreadyExistsException, FileNotFoundException,
@@ -427,7 +434,12 @@ public class FilterFileSystem extends FileSystem {
   public FileChecksum getFileChecksum(Path f) throws IOException {
     return fs.getFileChecksum(f);
   }
-  
+
+  @Override
+  public FileChecksum getFileChecksum(Path f, long length) throws IOException {
+    return fs.getFileChecksum(f, length);
+  }
+
   @Override
   public void setVerifyChecksum(boolean verifyChecksum) {
     fs.setVerifyChecksum(verifyChecksum);
@@ -538,4 +550,42 @@ public class FilterFileSystem extends FileSystem {
   public AclStatus getAclStatus(Path path) throws IOException {
     return fs.getAclStatus(path);
   }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value)
+      throws IOException {
+    fs.setXAttr(path, name, value);
+  }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    fs.setXAttr(path, name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    return fs.getXAttr(path, name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    return fs.getXAttrs(path);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    return fs.getXAttrs(path, names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return fs.listXAttrs(path);
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    fs.removeXAttr(path, name);
+  }
 }

+ 47 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -22,12 +22,14 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
@@ -118,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.getFileStatus(f);
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkPath(path);
+    myFs.access(path, mode);
+  }
+
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {
@@ -316,4 +325,42 @@ public abstract class FilterFs extends AbstractFileSystem {
   public AclStatus getAclStatus(Path path) throws IOException {
     return myFs.getAclStatus(path);
   }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value)
+      throws IOException {
+    myFs.setXAttr(path, name, value);
+  }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    myFs.setXAttr(path, name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    return myFs.getXAttr(path, name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    return myFs.getXAttrs(path);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    return myFs.getXAttrs(path, names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return myFs.listXAttrs(path);
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    myFs.removeXAttr(path, name);
+  }
 }

+ 46 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.LinkedList;
 
+import org.apache.commons.lang.WordUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +32,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -40,6 +42,8 @@ public class FsShell extends Configured implements Tool {
   
   static final Log LOG = LogFactory.getLog(FsShell.class);
 
+  private static final int MAX_LINE_WIDTH = 80;
+
   private FileSystem fs;
   private Trash trash;
   protected CommandFactory commandFactory;
@@ -117,7 +121,7 @@ public class FsShell extends Configured implements Tool {
     public static final String NAME = "usage";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays the usage for given command or all commands if none\n" +
+      "Displays the usage for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -137,7 +141,7 @@ public class FsShell extends Configured implements Tool {
     public static final String NAME = "help";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays help for given command or all commands if none\n" +
+      "Displays help for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -197,7 +201,7 @@ public class FsShell extends Configured implements Tool {
       for (String name : commandFactory.getNames()) {
         Command instance = commandFactory.getInstance(name);
         if (!instance.isDeprecated()) {
-          System.out.println("\t[" + instance.getUsage() + "]");
+          out.println("\t[" + instance.getUsage() + "]");
           instances.add(instance);
         }
       }
@@ -217,20 +221,48 @@ public class FsShell extends Configured implements Tool {
     out.println(usagePrefix + " " + instance.getUsage());
   }
 
-  // TODO: will eventually auto-wrap the text, but this matches the expected
-  // output for the hdfs tests...
   private void printInstanceHelp(PrintStream out, Command instance) {
-    boolean firstLine = true;
+    out.println(instance.getUsage() + " :");
+    TableListing listing = null;
+    final String prefix = "  ";
     for (String line : instance.getDescription().split("\n")) {
-      String prefix;
-      if (firstLine) {
-        prefix = instance.getUsage() + ":\t";
-        firstLine = false;
-      } else {
-        prefix = "\t\t";
+      if (line.matches("^[ \t]*[-<].*$")) {
+        String[] segments = line.split(":");
+        if (segments.length == 2) {
+          if (listing == null) {
+            listing = createOptionTableListing();
+          }
+          listing.addRow(segments[0].trim(), segments[1].trim());
+          continue;
+        }
+      }
+
+      // Normal literal description.
+      if (listing != null) {
+        for (String listingLine : listing.toString().split("\n")) {
+          out.println(prefix + listingLine);
+        }
+        listing = null;
+      }
+
+      for (String descLine : WordUtils.wrap(
+          line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
+        out.println(prefix + descLine);
+      }
+    }
+
+    if (listing != null) {
+      for (String listingLine : listing.toString().split("\n")) {
+        out.println(prefix + listingLine);
       }
-      System.out.println(prefix + line);
-    }    
+    }
+  }
+
+  // Creates a two-row table, the first row is for the command line option,
+  // the second row is for the option description.
+  private TableListing createOptionTableListing() {
+    return new TableListing.Builder().addField("").addField("", true)
+        .wrapWidth(MAX_LINE_WIDTH).build();
   }
 
   /**

+ 24 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java

@@ -63,18 +63,18 @@ public class FsShellPermissions extends FsCommand {
     public static final String NAME = "chmod";
     public static final String USAGE = "[-R] <MODE[,MODE]... | OCTALMODE> PATH...";
     public static final String DESCRIPTION =
-      "Changes permissions of a file.\n" +
-      "\tThis works similar to shell's chmod with a few exceptions.\n\n" +
-      "-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "MODE\tMode is same as mode used for chmod shell command.\n" +
-      "\tOnly letters recognized are 'rwxXt'. E.g. +t,a+r,g-w,+rwx,o=r\n\n" +
-      "OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may\n" +
-      "be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
-      "shell command, it is not possible to specify only part of the mode\n" +
-      "\tE.g. 754 is same as u=rwx,g=rx,o=r\n\n" +
-      "\tIf none of 'augo' is specified, 'a' is assumed and unlike\n" +
-      "\tshell command, no umask is applied.";
+      "Changes permissions of a file. " +
+      "This works similar to the shell's chmod command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option" +
+      " currently supported.\n" +
+      "<MODE>: Mode is the same as mode used for the shell's command. " +
+      "The only letters recognized are 'rwxXt', e.g. +t,a+r,g-w,+rwx,o=r.\n" +
+      "<OCTALMODE>: Mode specifed in 3 or 4 digits. If 4 digits, the first " +
+      "may be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
+      "the shell command, it is not possible to specify only part of the " +
+      "mode, e.g. 754 is same as u=rwx,g=rx,o=r.\n\n" +
+      "If none of 'augo' is specified, 'a' is assumed and unlike the " +
+      "shell command, no umask is applied.";
 
     protected ChmodParser pp;
 
@@ -121,18 +121,18 @@ public class FsShellPermissions extends FsCommand {
     public static final String NAME = "chown";
     public static final String USAGE = "[-R] [OWNER][:[GROUP]] PATH...";
     public static final String DESCRIPTION =
-      "Changes owner and group of a file.\n" +
-      "\tThis is similar to shell's chown with a few exceptions.\n\n" +
-      "\t-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "\tIf only owner or group is specified then only owner or\n" +
-      "\tgroup is modified.\n\n" +
-      "\tThe owner and group names may only consist of digits, alphabet,\n"+
-      "\tand any of " + allowedChars + ". The names are case sensitive.\n\n" +
-      "\tWARNING: Avoid using '.' to separate user name and group though\n" +
-      "\tLinux allows it. If user names have dots in them and you are\n" +
-      "\tusing local file system, you might see surprising results since\n" +
-      "\tshell command 'chown' is used for local files.";
+      "Changes owner and group of a file. " +
+      "This is similar to the shell's chown command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option " +
+      "currently supported.\n\n" +
+      "If only the owner or group is specified, then only the owner or " +
+      "group is modified. " +
+      "The owner and group names may only consist of digits, alphabet, "+
+      "and any of " + allowedChars + ". The names are case sensitive.\n\n" +
+      "WARNING: Avoid using '.' to separate user name and group though " +
+      "Linux allows it. If user names have dots in them and you are " +
+      "using local file system, you might see surprising results since " +
+      "the shell command 'chown' is used for local files.";
 
     ///allows only "allowedChars" above in names for owner and group
     static private final Pattern chownPattern = Pattern.compile(

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -687,7 +687,7 @@ public class HarFileSystem extends FileSystem {
    * @return null since no checksum algorithm is implemented.
    */
   @Override
-  public FileChecksum getFileChecksum(Path f) {
+  public FileChecksum getFileChecksum(Path f, long length) {
     return null;
   }
 

+ 34 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java

@@ -22,10 +22,13 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStreamReader;
+import java.io.StringReader;
 import java.util.Arrays;
 
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 /**
  * Class for creating hardlinks.
@@ -89,7 +92,6 @@ public class HardLink {
      *            to the source directory
      * @param linkDir - target directory where the hardlinks will be put
      * @return - an array of Strings suitable for use as a single shell command
-     *            with {@link Runtime.exec()}
      * @throws IOException - if any of the file or path names misbehave
      */
     abstract String[] linkMult(String[] fileBaseNames, File linkDir) 
@@ -230,17 +232,17 @@ public class HardLink {
     //package-private ("default") access instead of "private" to assist 
     //unit testing (sort of) on non-Win servers
 
+    static String CMD_EXE = "cmd.exe";
     static String[] hardLinkCommand = {
                         Shell.WINUTILS,"hardlink","create", null, null};
     static String[] hardLinkMultPrefix = {
-                        "cmd","/q","/c","for", "%f", "in", "("};
+        CMD_EXE, "/q", "/c", "for", "%f", "in", "("};
     static String   hardLinkMultDir = "\\%f";
     static String[] hardLinkMultSuffix = {
-                        ")", "do", Shell.WINUTILS, "hardlink", "create", null,
-                        "%f", "1>NUL"};
+        ")", "do", Shell.WINUTILS, "hardlink", "create", null,
+        "%f"};
     static String[] getLinkCountCommand = {
-                        Shell.WINUTILS, "hardlink",
-                        "stat", null};
+        Shell.WINUTILS, "hardlink", "stat", null};
     //Windows guarantees only 8K - 1 bytes cmd length.
     //Subtract another 64b to allow for Java 'exec' overhead
     static final int maxAllowedCmdArgLength = 8*1024 - 65;
@@ -278,7 +280,7 @@ public class HardLink {
       System.arraycopy(hardLinkMultSuffix, 0, buf, mark, 
                        hardLinkMultSuffix.length);
       mark += hardLinkMultSuffix.length;
-      buf[mark - 3] = td;
+      buf[mark - 2] = td;
       return buf;
     }
     
@@ -310,8 +312,8 @@ public class HardLink {
                linkDir.getCanonicalPath().length();
       //add the fixed overhead of the hardLinkMult command 
       //(prefix, suffix, and Dir suffix)
-      sum += ("cmd.exe /q /c for %f in ( ) do "
-              + Shell.WINUTILS + " hardlink create \\%f %f 1>NUL ").length();
+      sum += (CMD_EXE + " /q /c for %f in ( ) do "
+              + Shell.WINUTILS + " hardlink create \\%f %f").length();
       return sum;
     }
     
@@ -379,21 +381,14 @@ public class HardLink {
     }
 	  // construct and execute shell command
     String[] hardLinkCommand = getHardLinkCommand.linkOne(file, linkName);
-    Process process = Runtime.getRuntime().exec(hardLinkCommand);
+    ShellCommandExecutor shexec = new ShellCommandExecutor(hardLinkCommand);
     try {
-      if (process.waitFor() != 0) {
-        String errMsg = new BufferedReader(new InputStreamReader(
-            process.getInputStream())).readLine();
-        if (errMsg == null)  errMsg = "";
-        String inpMsg = new BufferedReader(new InputStreamReader(
-            process.getErrorStream())).readLine();
-        if (inpMsg == null)  inpMsg = "";
-        throw new IOException(errMsg + inpMsg);
-      }
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    } finally {
-      process.destroy();
+      shexec.execute();
+    } catch (ExitCodeException e) {
+      throw new IOException("Failed to execute command " +
+          Arrays.toString(hardLinkCommand) +
+          "; command output: \"" + shexec.getOutput() + "\"" +
+          "; WrappedException: \"" + e.getMessage() + "\"");
     }
   }
 
@@ -466,22 +461,12 @@ public class HardLink {
     // construct and execute shell command
     String[] hardLinkCommand = getHardLinkCommand.linkMult(fileBaseNames, 
         linkDir);
-    Process process = Runtime.getRuntime().exec(hardLinkCommand, null, 
-        parentDir);
+    ShellCommandExecutor shexec = new ShellCommandExecutor(hardLinkCommand,
+      parentDir, null, 0L);
     try {
-      if (process.waitFor() != 0) {
-        String errMsg = new BufferedReader(new InputStreamReader(
-            process.getInputStream())).readLine();
-        if (errMsg == null)  errMsg = "";
-        String inpMsg = new BufferedReader(new InputStreamReader(
-            process.getErrorStream())).readLine();
-        if (inpMsg == null)  inpMsg = "";
-        throw new IOException(errMsg + inpMsg);
-      }
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    } finally {
-      process.destroy();
+      shexec.execute();
+    } catch (ExitCodeException e) {
+      throw new IOException(shexec.getOutput() + e.getMessage());
     }
     return callCount;
   }
@@ -504,17 +489,13 @@ public class HardLink {
     String errMsg = null;
     int exitValue = -1;
     BufferedReader in = null;
-    BufferedReader err = null;
 
-    Process process = Runtime.getRuntime().exec(cmd);
+    ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
     try {
-      exitValue = process.waitFor();
-      in = new BufferedReader(new InputStreamReader(
-                                  process.getInputStream()));
+      shexec.execute();
+      in = new BufferedReader(new StringReader(shexec.getOutput()));
       inpMsg = in.readLine();
-      err = new BufferedReader(new InputStreamReader(
-                                   process.getErrorStream()));
-      errMsg = err.readLine();
+      exitValue = shexec.getExitCode();
       if (inpMsg == null || exitValue != 0) {
         throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
       }
@@ -524,14 +505,15 @@ public class HardLink {
       } else {
         return Integer.parseInt(inpMsg);
       }
-    } catch (NumberFormatException e) {
+    } catch (ExitCodeException e) {
+      inpMsg = shexec.getOutput();
+      errMsg = e.getMessage();
+      exitValue = e.getExitCode();
       throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
-    } catch (InterruptedException e) {
+    } catch (NumberFormatException e) {
       throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
     } finally {
-      process.destroy();
-      if (in != null) in.close();
-      if (err != null) err.close();
+      IOUtils.closeStream(in);
     }
   }
   

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java

@@ -40,7 +40,7 @@ public class PathIOException extends IOException {
    *  @param path for the exception
    */
   public PathIOException(String path) {
-    this(path, EIO, null);
+    this(path, EIO);
   }
 
   /**
@@ -59,7 +59,8 @@ public class PathIOException extends IOException {
    * @param error custom string to use an the error text
    */
   public PathIOException(String path, String error) {
-    this(path, error, null);
+    super(error);
+    this.path = path;
   }
 
   protected PathIOException(String path, String error, Throwable cause) {

+ 16 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import java.io.BufferedOutputStream;
 import java.io.DataOutput;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -105,6 +106,10 @@ public class RawLocalFileSystem extends FileSystem {
     
     @Override
     public void seek(long pos) throws IOException {
+      if (pos < 0) {
+        throw new EOFException(
+          FSExceptionMessages.NEGATIVE_SEEK);
+      }
       fis.getChannel().position(pos);
       this.position = pos;
     }
@@ -256,7 +261,7 @@ public class RawLocalFileSystem extends FileSystem {
       boolean createParent, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists: "+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     Path parent = f.getParent();
     if (parent != null && !mkdirs(parent)) {
@@ -272,7 +277,7 @@ public class RawLocalFileSystem extends FileSystem {
       EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
     if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
-      throw new IOException("File already exists: "+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     return new FSDataOutputStream(new BufferedOutputStream(
         new LocalFSFileOutputStream(f, false), bufferSize), statistics);
@@ -344,6 +349,10 @@ public class RawLocalFileSystem extends FileSystem {
   @Override
   public boolean delete(Path p, boolean recursive) throws IOException {
     File f = pathToFile(p);
+    if (!f.exists()) {
+      //no path, return false "nothing to delete"
+      return false;
+    }
     if (f.isFile()) {
       return f.delete();
     } else if (!recursive && f.isDirectory() && 
@@ -412,10 +421,14 @@ public class RawLocalFileSystem extends FileSystem {
     if(parent != null) {
       File parent2f = pathToFile(parent);
       if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
-        throw new FileAlreadyExistsException("Parent path is not a directory: " 
+        throw new ParentNotDirectoryException("Parent path is not a directory: "
             + parent);
       }
     }
+    if (p2f.exists() && !p2f.isDirectory()) {
+      throw new FileNotFoundException("Destination exists" +
+              " and is not a directory: " + p2f.getCanonicalPath());
+    }
     return (parent == null || mkdirs(parent)) &&
       (p2f.mkdir() || p2f.isDirectory());
   }

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java

@@ -128,6 +128,8 @@ public class Stat extends Shell {
           " link " + original);
     }
     // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
+    // OR
+    // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,'link' -> 'target'
     StringTokenizer tokens = new StringTokenizer(line, ",");
     try {
       long length = Long.parseLong(tokens.nextToken());
@@ -147,18 +149,17 @@ public class Stat extends Shell {
       String group = tokens.nextToken();
       String symStr = tokens.nextToken();
       // 'notalink'
-      // 'link' -> `target'
+      // `link' -> `target' OR 'link' -> 'target'
       // '' -> ''
       Path symlink = null;
-      StringTokenizer symTokens = new StringTokenizer(symStr, "`");
-      symTokens.nextToken();
+      String parts[] = symStr.split(" -> ");      
       try {
-        String target = symTokens.nextToken();
-        target = target.substring(0, target.length()-1);
+        String target = parts[1];
+        target = target.substring(1, target.length()-1);
         if (!target.isEmpty()) {
           symlink = new Path(target);
         }
-      } catch (NoSuchElementException e) {
+      } catch (ArrayIndexOutOfBoundsException e) {
         // null if not a symlink
       }
       // Set stat

+ 121 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java

@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.commons.codec.DecoderException;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * The value of <code>XAttr</code> is byte[], this class is to 
+ * covert byte[] to some kind of string representation or convert back.
+ * String representation is convenient for display and input. For example
+ * display in screen as shell response and json response, input as http
+ * or shell parameter. 
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public enum XAttrCodec {
+  /**
+   * Value encoded as text 
+   * string is enclosed in double quotes (\").
+   */
+  TEXT,
+  
+  /**
+   * Value encoded as hexadecimal string 
+   * is prefixed with 0x.
+   */
+  HEX,
+  
+  /**
+   * Value encoded as base64 string 
+   * is prefixed with 0s.
+   */
+  BASE64;
+  
+  private static final String HEX_PREFIX = "0x";
+  private static final String BASE64_PREFIX = "0s";
+  private static final Base64 base64 = new Base64(0);
+  
+  /**
+   * Decode string representation of a value and check whether it's 
+   * encoded. If the given string begins with 0x or 0X, it expresses
+   * a hexadecimal number. If the given string begins with 0s or 0S,
+   * base64 encoding is expected. If the given string is enclosed in 
+   * double quotes, the inner string is treated as text. Otherwise 
+   * the given string is treated as text. 
+   * @param value string representation of the value.
+   * @return byte[] the value
+   * @throws IOException
+   */
+  public static byte[] decodeValue(String value) throws IOException {
+    byte[] result = null;
+    if (value != null) {
+      if (value.length() >= 2) {
+        String en = value.substring(0, 2);
+        if (value.startsWith("\"") && value.endsWith("\"")) {
+          value = value.substring(1, value.length()-1);
+          result = value.getBytes("utf-8");
+        } else if (en.equalsIgnoreCase(HEX_PREFIX)) {
+          value = value.substring(2, value.length());
+          try {
+            result = Hex.decodeHex(value.toCharArray());
+          } catch (DecoderException e) {
+            throw new IOException(e);
+          }
+        } else if (en.equalsIgnoreCase(BASE64_PREFIX)) {
+          value = value.substring(2, value.length());
+          result = base64.decode(value);
+        }
+      }
+      if (result == null) {
+        result = value.getBytes("utf-8");
+      }
+    }
+    return result;
+  }
+  
+  /**
+   * Encode byte[] value to string representation with encoding. 
+   * Values encoded as text strings are enclosed in double quotes (\"), 
+   * while strings encoded as hexadecimal and base64 are prefixed with 
+   * 0x and 0s, respectively.
+   * @param value byte[] value
+   * @param encoding
+   * @return String string representation of value
+   * @throws IOException
+   */
+  public static String encodeValue(byte[] value, XAttrCodec encoding) 
+      throws IOException {
+    Preconditions.checkNotNull(value, "Value can not be null.");
+    if (encoding == HEX) {
+      return HEX_PREFIX + Hex.encodeHexString(value);
+    } else if (encoding == BASE64) {
+      return BASE64_PREFIX + base64.encodeToString(value);
+    } else {
+      return "\"" + new String(value, "utf-8") + "\"";
+    }
+  }
+}

+ 71 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public enum XAttrSetFlag {
+  /**
+   * Create a new xattr.
+   * If the xattr exists already, exception will be thrown.
+   */
+  CREATE((short) 0x01),
+
+  /**
+   * Replace a existing xattr.
+   * If the xattr does not exist, exception will be thrown.
+   */
+  REPLACE((short) 0x02);
+
+  private final short flag;
+
+  private XAttrSetFlag(short flag) {
+    this.flag = flag;
+  }
+
+  short getFlag() {
+    return flag;
+  }
+
+  public static void validate(String xAttrName, boolean xAttrExists,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    if (flag == null || flag.isEmpty()) {
+      throw new HadoopIllegalArgumentException("A flag must be specified.");
+    }
+
+    if (xAttrExists) {
+      if (!flag.contains(REPLACE)) {
+        throw new IOException("XAttr: " + xAttrName +
+            " already exists. The REPLACE flag must be specified.");
+      }
+    } else {
+      if (!flag.contains(CREATE)) {
+        throw new IOException("XAttr: " + xAttrName +
+            " does not exist. The CREATE flag must be specified.");
+      }
+    }
+  }
+}

+ 80 - 37
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ftp;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.ConnectException;
 import java.net.URI;
 
 import org.apache.commons.logging.Log;
@@ -33,11 +34,14 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -56,6 +60,12 @@ public class FTPFileSystem extends FileSystem {
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 
   public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+  public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
+  public static final String FS_FTP_HOST = "fs.ftp.host";
+  public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
+  public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password.";
+  public static final String E_SAME_DIRECTORY_ONLY =
+      "only same directory renames are supported";
 
   private URI uri;
 
@@ -75,11 +85,11 @@ public class FTPFileSystem extends FileSystem {
     super.initialize(uri, conf);
     // get host information from uri (overrides info in conf)
     String host = uri.getHost();
-    host = (host == null) ? conf.get("fs.ftp.host", null) : host;
+    host = (host == null) ? conf.get(FS_FTP_HOST, null) : host;
     if (host == null) {
       throw new IOException("Invalid host specified");
     }
-    conf.set("fs.ftp.host", host);
+    conf.set(FS_FTP_HOST, host);
 
     // get port information from uri, (overrides info in conf)
     int port = uri.getPort();
@@ -96,11 +106,11 @@ public class FTPFileSystem extends FileSystem {
       }
     }
     String[] userPasswdInfo = userAndPassword.split(":");
-    conf.set("fs.ftp.user." + host, userPasswdInfo[0]);
+    conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]);
     if (userPasswdInfo.length > 1) {
-      conf.set("fs.ftp.password." + host, userPasswdInfo[1]);
+      conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
     } else {
-      conf.set("fs.ftp.password." + host, null);
+      conf.set(FS_FTP_PASSWORD_PREFIX + host, null);
     }
     setConf(conf);
     this.uri = uri;
@@ -115,23 +125,24 @@ public class FTPFileSystem extends FileSystem {
   private FTPClient connect() throws IOException {
     FTPClient client = null;
     Configuration conf = getConf();
-    String host = conf.get("fs.ftp.host");
-    int port = conf.getInt("fs.ftp.host.port", FTP.DEFAULT_PORT);
-    String user = conf.get("fs.ftp.user." + host);
-    String password = conf.get("fs.ftp.password." + host);
+    String host = conf.get(FS_FTP_HOST);
+    int port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT);
+    String user = conf.get(FS_FTP_USER_PREFIX + host);
+    String password = conf.get(FS_FTP_PASSWORD_PREFIX + host);
     client = new FTPClient();
     client.connect(host, port);
     int reply = client.getReplyCode();
     if (!FTPReply.isPositiveCompletion(reply)) {
-      throw new IOException("Server - " + host
-          + " refused connection on port - " + port);
+      throw NetUtils.wrapException(host, port,
+                   NetUtils.UNKNOWN_HOST, 0,
+                   new ConnectException("Server response " + reply));
     } else if (client.login(user, password)) {
       client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
       client.setFileType(FTP.BINARY_FILE_TYPE);
       client.setBufferSize(DEFAULT_BUFFER_SIZE);
     } else {
       throw new IOException("Login failed on server - " + host + ", port - "
-          + port);
+          + port + " as user '" + user + "'");
     }
 
     return client;
@@ -179,7 +190,7 @@ public class FTPFileSystem extends FileSystem {
     FileStatus fileStat = getFileStatus(client, absolute);
     if (fileStat.isDirectory()) {
       disconnect(client);
-      throw new IOException("Path " + file + " is a directory.");
+      throw new FileNotFoundException("Path " + file + " is a directory.");
     }
     client.allocate(bufferSize);
     Path parent = absolute.getParent();
@@ -214,12 +225,18 @@ public class FTPFileSystem extends FileSystem {
     final FTPClient client = connect();
     Path workDir = new Path(client.printWorkingDirectory());
     Path absolute = makeAbsolute(workDir, file);
-    if (exists(client, file)) {
-      if (overwrite) {
-        delete(client, file);
+    FileStatus status;
+    try {
+      status = getFileStatus(client, file);
+    } catch (FileNotFoundException fnfe) {
+      status = null;
+    }
+    if (status != null) {
+      if (overwrite && !status.isDirectory()) {
+        delete(client, file, false);
       } else {
         disconnect(client);
-        throw new IOException("File already exists: " + file);
+        throw new FileAlreadyExistsException("File already exists: " + file);
       }
     }
     
@@ -272,14 +289,13 @@ public class FTPFileSystem extends FileSystem {
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
    * the overhead of opening/closing a TCP connection.
+   * @throws IOException on IO problems other than FileNotFoundException
    */
-  private boolean exists(FTPClient client, Path file) {
+  private boolean exists(FTPClient client, Path file) throws IOException {
     try {
       return getFileStatus(client, file) != null;
     } catch (FileNotFoundException fnfe) {
       return false;
-    } catch (IOException ioe) {
-      throw new FTPException("Failed to get file status", ioe);
     }
   }
 
@@ -294,12 +310,6 @@ public class FTPFileSystem extends FileSystem {
     }
   }
 
-  /** @deprecated Use delete(Path, boolean) instead */
-  @Deprecated
-  private boolean delete(FTPClient client, Path file) throws IOException {
-    return delete(client, file, false);
-  }
-
   /**
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
@@ -310,9 +320,14 @@ public class FTPFileSystem extends FileSystem {
     Path workDir = new Path(client.printWorkingDirectory());
     Path absolute = makeAbsolute(workDir, file);
     String pathName = absolute.toUri().getPath();
-    FileStatus fileStat = getFileStatus(client, absolute);
-    if (fileStat.isFile()) {
-      return client.deleteFile(pathName);
+    try {
+      FileStatus fileStat = getFileStatus(client, absolute);
+      if (fileStat.isFile()) {
+        return client.deleteFile(pathName);
+      }
+    } catch (FileNotFoundException e) {
+      //the file is not there
+      return false;
     }
     FileStatus[] dirEntries = listStatus(client, absolute);
     if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
@@ -491,7 +506,7 @@ public class FTPFileSystem extends FileSystem {
         created = created && client.makeDirectory(pathName);
       }
     } else if (isFile(client, absolute)) {
-      throw new IOException(String.format(
+      throw new ParentNotDirectoryException(String.format(
           "Can't make directory for path %s since it is a file.", absolute));
     }
     return created;
@@ -527,6 +542,23 @@ public class FTPFileSystem extends FileSystem {
     }
   }
 
+  /**
+   * Probe for a path being a parent of another
+   * @param parent parent path
+   * @param child possible child path
+   * @return true if the parent's path matches the start of the child's
+   */
+  private boolean isParentOf(Path parent, Path child) {
+    URI parentURI = parent.toUri();
+    String parentPath = parentURI.getPath();
+    if (!parentPath.endsWith("/")) {
+      parentPath += "/";
+    }
+    URI childURI = child.toUri();
+    String childPath = childURI.getPath();
+    return childPath.startsWith(parentPath);
+  }
+
   /**
    * Convenience method, so that we don't open a new connection when using this
    * method from within another method. Otherwise every API invocation incurs
@@ -544,20 +576,31 @@ public class FTPFileSystem extends FileSystem {
     Path absoluteSrc = makeAbsolute(workDir, src);
     Path absoluteDst = makeAbsolute(workDir, dst);
     if (!exists(client, absoluteSrc)) {
-      throw new IOException("Source path " + src + " does not exist");
+      throw new FileNotFoundException("Source path " + src + " does not exist");
+    }
+    if (isDirectory(absoluteDst)) {
+      // destination is a directory: rename goes underneath it with the
+      // source name
+      absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
     }
     if (exists(client, absoluteDst)) {
-      throw new IOException("Destination path " + dst
-          + " already exist, cannot rename!");
+      throw new FileAlreadyExistsException("Destination path " + dst
+          + " already exists");
     }
     String parentSrc = absoluteSrc.getParent().toUri().toString();
     String parentDst = absoluteDst.getParent().toUri().toString();
-    String from = src.getName();
-    String to = dst.getName();
+    if (isParentOf(absoluteSrc, absoluteDst)) {
+      throw new IOException("Cannot rename " + absoluteSrc + " under itself"
+      + " : "+ absoluteDst);
+    }
+
     if (!parentSrc.equals(parentDst)) {
-      throw new IOException("Cannot rename parent(source): " + parentSrc
-          + ", parent(destination):  " + parentDst);
+      throw new IOException("Cannot rename source: " + absoluteSrc
+          + " to " + absoluteDst
+          + " -"+ E_SAME_DIRECTORY_ONLY);
     }
+    String from = absoluteSrc.getName();
+    String to = absoluteDst.getName();
     client.changeWorkingDirectory(parentSrc);
     boolean renamed = client.rename(from, to);
     return renamed;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java

@@ -103,7 +103,7 @@ public class FTPInputStream extends FSInputStream {
   @Override
   public synchronized void close() throws IOException {
     if (closed) {
-      throw new IOException("Stream closed");
+      return;
     }
     super.close();
     closed = true;

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java

@@ -278,7 +278,7 @@ public class AclEntry {
     }
 
     if (includePermission) {
-      if (split.length < index) {
+      if (split.length <= index) {
         throw new HadoopIllegalArgumentException("Invalid <aclSpec> : "
             + aclStr);
       }
@@ -298,4 +298,18 @@ public class AclEntry {
     AclEntry aclEntry = builder.build();
     return aclEntry;
   }
+
+  /**
+   * Convert a List of AclEntries into a string - the reverse of parseAclSpec.
+   * @param aclSpec List of AclEntries to convert
+   * @return String representation of aclSpec
+   */
+  public static String aclSpecToString(List<AclEntry> aclSpec) {
+    StringBuilder buf = new StringBuilder();
+    for ( AclEntry e : aclSpec ) {
+      buf.append(e.toString());
+      buf.append(",");
+    }
+    return buf.substring(0, buf.length()-1);  // remove last ,
+  }
 }

+ 134 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.permission;
+
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.collect.Lists;
+
+/**
+ * AclUtil contains utility methods for manipulating ACLs.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class AclUtil {
+
+  /**
+   * Given permissions and extended ACL entries, returns the full logical ACL.
+   *
+   * @param perm FsPermission containing permissions
+   * @param entries List<AclEntry> containing extended ACL entries
+   * @return List<AclEntry> containing full logical ACL
+   */
+  public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
+      List<AclEntry> entries) {
+    List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3);
+
+    // Owner entry implied by owner permission bits.
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(AclEntryType.USER)
+      .setPermission(perm.getUserAction())
+      .build());
+
+    // All extended access ACL entries.
+    boolean hasAccessAcl = false;
+    Iterator<AclEntry> entryIter = entries.iterator();
+    AclEntry curEntry = null;
+    while (entryIter.hasNext()) {
+      curEntry = entryIter.next();
+      if (curEntry.getScope() == AclEntryScope.DEFAULT) {
+        break;
+      }
+      hasAccessAcl = true;
+      acl.add(curEntry);
+    }
+
+    // Mask entry implied by group permission bits, or group entry if there is
+    // no access ACL (only default ACL).
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
+      .setPermission(perm.getGroupAction())
+      .build());
+
+    // Other entry implied by other bits.
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(AclEntryType.OTHER)
+      .setPermission(perm.getOtherAction())
+      .build());
+
+    // Default ACL entries.
+    if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
+      acl.add(curEntry);
+      while (entryIter.hasNext()) {
+        acl.add(entryIter.next());
+      }
+    }
+
+    return acl;
+  }
+
+  /**
+   * Translates the given permission bits to the equivalent minimal ACL.
+   *
+   * @param perm FsPermission to translate
+   * @return List<AclEntry> containing exactly 3 entries representing the owner,
+   *   group and other permissions
+   */
+  public static List<AclEntry> getMinimalAcl(FsPermission perm) {
+    return Lists.newArrayList(
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER)
+        .setPermission(perm.getUserAction())
+        .build(),
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.GROUP)
+        .setPermission(perm.getGroupAction())
+        .build(),
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.OTHER)
+        .setPermission(perm.getOtherAction())
+        .build());
+  }
+
+  /**
+   * Checks if the given entries represent a minimal ACL (contains exactly 3
+   * entries).
+   *
+   * @param entries List<AclEntry> entries to check
+   * @return boolean true if the entries represent a minimal ACL
+   */
+  public static boolean isMinimalAcl(List<AclEntry> entries) {
+    return entries.size() == 3;
+  }
+
+  /**
+   * There is no reason to instantiate this class.
+   */
+  private AclUtil() {
+  }
+}

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -158,6 +158,17 @@ public class FsPermission implements Writable {
     return (short)s;
   }
 
+  /**
+   * Encodes the object to a short.  Unlike {@link #toShort()}, this method may
+   * return values outside the fixed range 00000 - 01777 if extended features
+   * are encoded into this permission, such as the ACL bit.
+   *
+   * @return short extended short representation of this permission
+   */
+  public short toExtendedShort() {
+    return toShort();
+  }
+
   @Override
   public boolean equals(Object obj) {
     if (obj instanceof FsPermission) {
@@ -273,6 +284,16 @@ public class FsPermission implements Writable {
     return stickyBit;
   }
 
+  /**
+   * Returns true if there is also an ACL (access control list).
+   *
+   * @return boolean true if there is also an ACL (access control list).
+   */
+  public boolean getAclBit() {
+    // File system subclasses that support the ACL bit would override this.
+    return false;
+  }
+
   /** Set the user file creation mask (umask) */
   public static void setUMask(Configuration conf, FsPermission umask) {
     conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort()));

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java

@@ -15,12 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.fs.permission;
 
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 
@@ -28,8 +29,9 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
  * Groups a list of ACL entries into separate lists for access entries vs.
  * default entries.
  */
-@InterfaceAudience.Private
-final class ScopedAclEntries {
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class ScopedAclEntries {
   private static final int PIVOT_NOT_FOUND = -1;
 
   private final List<AclEntry> accessEntries;

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -226,7 +227,7 @@ public class S3FileSystem extends FileSystem {
       if (overwrite) {
         delete(file, true);
       } else {
-        throw new IOException("File already exists: " + file);
+        throw new FileAlreadyExistsException("File already exists: " + file);
       }
     } else {
       Path parent = file.getParent();

+ 146 - 82
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
 
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -32,17 +33,19 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.s3.S3Credentials;
 import org.apache.hadoop.fs.s3.S3Exception;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
 import org.jets3t.service.ServiceException;
 import org.jets3t.service.StorageObjectsChunk;
+import org.jets3t.service.impl.rest.HttpException;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.MultipartPart;
 import org.jets3t.service.model.MultipartUpload;
@@ -51,6 +54,8 @@ import org.jets3t.service.model.S3Object;
 import org.jets3t.service.model.StorageObject;
 import org.jets3t.service.security.AWSCredentials;
 import org.jets3t.service.utils.MultipartUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -63,9 +68,11 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   private boolean multipartEnabled;
   private long multipartCopyBlockSize;
   static final long MAX_PART_SIZE = (long)5 * 1024 * 1024 * 1024;
+
+  private String serverSideEncryptionAlgorithm;
   
-  public static final Log LOG =
-      LogFactory.getLog(Jets3tNativeFileSystemStore.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(Jets3tNativeFileSystemStore.class);
 
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
@@ -77,7 +84,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
             s3Credentials.getSecretAccessKey());
       this.s3Service = new RestS3Service(awsCredentials);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     multipartEnabled =
         conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
@@ -87,6 +94,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     multipartCopyBlockSize = Math.min(
         conf.getLong("fs.s3n.multipart.copy.block.size", MAX_PART_SIZE),
         MAX_PART_SIZE);
+    serverSideEncryptionAlgorithm = conf.get("fs.s3n.server-side-encryption-algorithm");
 
     bucket = new S3Bucket(uri.getHost());
   }
@@ -107,20 +115,15 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       object.setDataInputStream(in);
       object.setContentType("binary/octet-stream");
       object.setContentLength(file.length());
+      object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       if (md5Hash != null) {
         object.setMd5Hash(md5Hash);
       }
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     } finally {
-      if (in != null) {
-        try {
-          in.close();
-        } catch (IOException e) {
-          // ignore
-        }
-      }
+      IOUtils.closeStream(in);
     }
   }
 
@@ -130,6 +133,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     object.setDataInputFile(file);
     object.setContentType("binary/octet-stream");
     object.setContentLength(file.length());
+    object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
     if (md5Hash != null) {
       object.setMd5Hash(md5Hash);
     }
@@ -142,10 +146,8 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     try {
       mpUtils.uploadObjects(bucket.getName(), s3Service,
                             objectsToUploadAsMultipart, null);
-    } catch (ServiceException e) {
-      handleServiceException(e);
     } catch (Exception e) {
-      throw new S3Exception(e);
+      handleException(e, key);
     }
   }
   
@@ -156,9 +158,10 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       object.setDataInputStream(new ByteArrayInputStream(new byte[0]));
       object.setContentType("binary/octet-stream");
       object.setContentLength(0);
+      object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.putObject(bucket, object);
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+    } catch (ServiceException e) {
+      handleException(e, key);
     }
   }
 
@@ -166,20 +169,21 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   public FileMetadata retrieveMetadata(String key) throws IOException {
     StorageObject object = null;
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting metadata for key: {} from bucket: {}",
+          key, bucket.getName());
       object = s3Service.getObjectDetails(bucket.getName(), key);
       return new FileMetadata(key, object.getContentLength(),
           object.getLastModifiedDate().getTime());
 
     } catch (ServiceException e) {
-      // Following is brittle. Is there a better way?
-      if ("NoSuchKey".equals(e.getErrorCode())) {
-        return null; //return null if key not found
+      try {
+        // process
+        handleException(e, key);
+        return null;
+      } catch (FileNotFoundException fnfe) {
+        // and downgrade missing files
+        return null;
       }
-      handleServiceException(e);
-      return null; //never returned - keep compiler happy
     } finally {
       if (object != null) {
         object.closeDataInputStream();
@@ -198,13 +202,12 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   @Override
   public InputStream retrieve(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
-      }
+      LOG.debug("Getting key: {} from bucket: {}",
+          key, bucket.getName());
       S3Object object = s3Service.getObject(bucket.getName(), key);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
       return null; //return null if key not found
     }
   }
@@ -222,15 +225,14 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   public InputStream retrieve(String key, long byteRangeStart)
           throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
-      }
+      LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
+          key, bucket.getName(), byteRangeStart);
       S3Object object = s3Service.getObject(bucket, key, null, null, null,
                                             null, byteRangeStart, null);
       return object.getDataInputStream();
     } catch (ServiceException e) {
-      handleServiceException(key, e);
-      return null; //return null if key not found
+      handleException(e, key);
+      return null;
     }
   }
 
@@ -248,17 +250,19 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   }
 
   /**
-   *
-   * @return
-   * This method returns null if the list could not be populated
-   * due to S3 giving ServiceException
-   * @throws IOException
+   * list objects
+   * @param prefix prefix
+   * @param delimiter delimiter
+   * @param maxListingLength max no. of entries
+   * @param priorLastKey last key in any previous search
+   * @return a list of matches
+   * @throws IOException on any reported failure
    */
 
   private PartialListing list(String prefix, String delimiter,
       int maxListingLength, String priorLastKey) throws IOException {
     try {
-      if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
+      if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
       StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
@@ -273,24 +277,20 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       }
       return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
           chunk.getCommonPrefixes());
-    } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
-      return null; //never returned - keep compiler happy
     } catch (ServiceException e) {
-      handleServiceException(e);
-      return null; //return null if list could not be populated
+      handleException(e, prefix);
+      return null; // never returned - keep compiler happy
     }
   }
 
   @Override
   public void delete(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
-      }
+      LOG.debug("Deleting key: {} from bucket: {}",
+          key, bucket.getName());
       s3Service.deleteObject(bucket, key);
     } catch (ServiceException e) {
-      handleServiceException(key, e);
+      handleException(e, key);
     }
   }
 
@@ -298,7 +298,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     try {
       s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcKey);
     }
   }
   
@@ -317,10 +317,13 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
           return;
         }
       }
+
+      S3Object dstObject = new S3Object(dstKey);
+      dstObject.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
       s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
-          new S3Object(dstKey), false);
+          dstObject, false);
     } catch (ServiceException e) {
-      handleServiceException(srcKey, e);
+      handleException(e, srcKey);
     }
   }
 
@@ -355,19 +358,22 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       Collections.reverse(listedParts);
       s3Service.multipartCompleteUpload(multipartUpload, listedParts);
     } catch (ServiceException e) {
-      handleServiceException(e);
+      handleException(e, srcObject.getKey());
     }
   }
 
   @Override
   public void purge(String prefix) throws IOException {
+    String key = "";
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      S3Object[] objects =
+          s3Service.listObjects(bucket.getName(), prefix, null);
       for (S3Object object : objects) {
-        s3Service.deleteObject(bucket, object.getKey());
+        key = object.getKey();
+        s3Service.deleteObject(bucket, key);
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e, key);
     }
   }
 
@@ -381,39 +387,97 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
         sb.append(object.getKey()).append("\n");
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleException(e);
     }
     System.out.println(sb);
   }
 
-  private void handleServiceException(String key, ServiceException e) throws IOException {
-    if ("NoSuchKey".equals(e.getErrorCode())) {
-      throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
-    } else {
-      handleServiceException(e);
-    }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e) throws IOException {
+    throw processException(e, e, "");
   }
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param e exception
+   * @param key key sought from object store
 
-  private void handleS3ServiceException(S3ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
-      }
-      throw new S3Exception(e);
-    }
+   * @throws IOException exception -always
+   */
+  private void handleException(Exception e, String key) throws IOException {
+    throw processException(e, e, key);
   }
 
-  private void handleServiceException(ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
+  /**
+   * Handle any service exception by translating it into an IOException
+   * @param thrown exception
+   * @param original original exception -thrown if no other translation could
+   * be made
+   * @param key key sought from object store or "" for undefined
+   * @return an exception to throw. If isProcessingCause==true this may be null.
+   */
+  private IOException processException(Throwable thrown, Throwable original,
+      String key) {
+    IOException result;
+    if (thrown.getCause() != null) {
+      // recurse down
+      result = processException(thrown.getCause(), original, key);
+    } else if (thrown instanceof HttpException) {
+      // nested HttpException - examine error code and react
+      HttpException httpException = (HttpException) thrown;
+      String responseMessage = httpException.getResponseMessage();
+      int responseCode = httpException.getResponseCode();
+      String bucketName = "s3n://" + bucket.getName();
+      String text = String.format("%s : %03d : %s",
+          bucketName,
+          responseCode,
+          responseMessage);
+      String filename = !key.isEmpty() ? (bucketName + "/" + key) : text;
+      IOException ioe;
+      switch (responseCode) {
+        case 404:
+          result = new FileNotFoundException(filename);
+          break;
+        case 416: // invalid range
+          result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+                                    +": " + filename);
+          break;
+        case 403: //forbidden
+          result = new AccessControlException("Permission denied"
+                                    +": " + filename);
+          break;
+        default:
+          result = new IOException(text);
+      }
+      result.initCause(thrown);
+    } else if (thrown instanceof S3ServiceException) {
+      S3ServiceException se = (S3ServiceException) thrown;
+      LOG.debug(
+          "S3ServiceException: {}: {} : {}",
+          se.getS3ErrorCode(), se.getS3ErrorMessage(), se, se);
+      if ("InvalidRange".equals(se.getS3ErrorCode())) {
+        result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+      } else {
+        result = new S3Exception(se);
       }
+    } else if (thrown instanceof ServiceException) {
+      ServiceException se = (ServiceException) thrown;
+      LOG.debug("S3ServiceException: {}: {} : {}",
+          se.getErrorCode(), se.toString(), se, se);
+      result = new S3Exception(se);
+    } else if (thrown instanceof IOException) {
+      result = (IOException) thrown;
+    } else {
+      // here there is no exception derived yet.
+      // this means no inner cause, and no translation made yet.
+      // convert the original to an IOException -rather than just the
+      // exception at the base of the tree
+      result = new S3Exception(original);
     }
+
+    return result;
   }
 }

+ 81 - 27
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.s3native;
 
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -37,17 +38,19 @@ import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.s3.S3Exception;
@@ -55,6 +58,8 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -81,8 +86,8 @@ import org.apache.hadoop.util.Progressable;
 @InterfaceStability.Stable
 public class NativeS3FileSystem extends FileSystem {
   
-  public static final Log LOG = 
-    LogFactory.getLog(NativeS3FileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(NativeS3FileSystem.class);
   
   private static final String FOLDER_SUFFIX = "_$folder$";
   static final String PATH_DELIMITER = Path.SEPARATOR;
@@ -97,6 +102,7 @@ public class NativeS3FileSystem extends FileSystem {
     private long pos = 0;
     
     public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
+      Preconditions.checkNotNull(in, "Null input stream");
       this.store = store;
       this.statistics = statistics;
       this.in = in;
@@ -105,13 +111,20 @@ public class NativeS3FileSystem extends FileSystem {
     
     @Override
     public synchronized int read() throws IOException {
-      int result = -1;
+      int result;
       try {
         result = in.read();
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
-        seek(pos);
-        result = in.read();
+        LOG.info("Received IOException while reading '{}', attempting to reopen",
+            key);
+        LOG.debug("{}", e, e);
+        try {
+          seek(pos);
+          result = in.read();
+        } catch (EOFException eof) {
+          LOG.debug("EOF on input stream read: {}", eof, eof);
+          result = -1;
+        }
       } 
       if (result != -1) {
         pos++;
@@ -124,12 +137,17 @@ public class NativeS3FileSystem extends FileSystem {
     @Override
     public synchronized int read(byte[] b, int off, int len)
       throws IOException {
-      
+      if (in == null) {
+        throw new EOFException("Cannot read closed stream");
+      }
       int result = -1;
       try {
         result = in.read(b, off, len);
+      } catch (EOFException eof) {
+        throw eof;
       } catch (IOException e) {
-        LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
+        LOG.info( "Received IOException while reading '{}'," +
+                  " attempting to reopen.", key);
         seek(pos);
         result = in.read(b, off, len);
       }
@@ -143,17 +161,53 @@ public class NativeS3FileSystem extends FileSystem {
     }
 
     @Override
-    public void close() throws IOException {
-      in.close();
+    public synchronized void close() throws IOException {
+      closeInnerStream();
+    }
+
+    /**
+     * Close the inner stream if not null. Even if an exception
+     * is raised during the close, the field is set to null
+     * @throws IOException if raised by the close() operation.
+     */
+    private void closeInnerStream() throws IOException {
+      if (in != null) {
+        try {
+          in.close();
+        } finally {
+          in = null;
+        }
+      }
+    }
+
+    /**
+     * Update inner stream with a new stream and position
+     * @param newStream new stream -must not be null
+     * @param newpos new position
+     * @throws IOException IO exception on a failure to close the existing
+     * stream.
+     */
+    private synchronized void updateInnerStream(InputStream newStream, long newpos) throws IOException {
+      Preconditions.checkNotNull(newStream, "Null newstream argument");
+      closeInnerStream();
+      in = newStream;
+      this.pos = newpos;
     }
 
     @Override
-    public synchronized void seek(long pos) throws IOException {
-      in.close();
-      LOG.info("Opening key '" + key + "' for reading at position '" + pos + "'");
-      in = store.retrieve(key, pos);
-      this.pos = pos;
+    public synchronized void seek(long newpos) throws IOException {
+      if (newpos < 0) {
+        throw new EOFException(
+            FSExceptionMessages.NEGATIVE_SEEK);
+      }
+      if (pos != newpos) {
+        // the seek is attempting to move the current position
+        LOG.debug("Opening key '{}' for reading at position '{}", key, newpos);
+        InputStream newStream = store.retrieve(key, newpos);
+        updateInnerStream(newStream, newpos);
+      }
     }
+
     @Override
     public synchronized long getPos() throws IOException {
       return pos;
@@ -172,6 +226,7 @@ public class NativeS3FileSystem extends FileSystem {
     private OutputStream backupStream;
     private MessageDigest digest;
     private boolean closed;
+    private LocalDirAllocator lDirAlloc;
     
     public NativeS3FsOutputStream(Configuration conf,
         NativeFileSystemStore store, String key, Progressable progress,
@@ -193,11 +248,10 @@ public class NativeS3FileSystem extends FileSystem {
     }
 
     private File newBackupFile() throws IOException {
-      File dir = new File(conf.get("fs.s3.buffer.dir"));
-      if (!dir.mkdirs() && !dir.exists()) {
-        throw new IOException("Cannot create S3 buffer directory: " + dir);
+      if (lDirAlloc == null) {
+        lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
       }
-      File result = File.createTempFile("output-", ".tmp", dir);
+      File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
       result.deleteOnExit();
       return result;
     }
@@ -214,7 +268,7 @@ public class NativeS3FileSystem extends FileSystem {
       }
 
       backupStream.close();
-      LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
+      LOG.info("OutputStream for key '{}' closed. Now beginning upload", key);
       
       try {
         byte[] md5Hash = digest == null ? null : digest.digest();
@@ -226,7 +280,7 @@ public class NativeS3FileSystem extends FileSystem {
         super.close();
         closed = true;
       } 
-      LOG.info("OutputStream for key '" + key + "' upload complete");
+      LOG.info("OutputStream for key '{}' upload complete", key);
     }
 
     @Override
@@ -339,7 +393,7 @@ public class NativeS3FileSystem extends FileSystem {
       Progressable progress) throws IOException {
 
     if (exists(f) && !overwrite) {
-      throw new IOException("File already exists:"+f);
+      throw new FileAlreadyExistsException("File already exists: " + f);
     }
     
     if(LOG.isDebugEnabled()) {
@@ -367,7 +421,7 @@ public class NativeS3FileSystem extends FileSystem {
     String key = pathToKey(absolutePath);
     if (status.isDirectory()) {
       if (!recurse && listStatus(f).length > 0) {
-        throw new IOException("Can not delete " + f + " at is a not empty directory and recurse option is false");
+        throw new IOException("Can not delete " + f + " as is a not empty directory and recurse option is false");
       }
 
       createParent(f);
@@ -538,7 +592,7 @@ public class NativeS3FileSystem extends FileSystem {
     try {
       FileStatus fileStatus = getFileStatus(f);
       if (fileStatus.isFile()) {
-        throw new IOException(String.format(
+        throw new FileAlreadyExistsException(String.format(
             "Can't make directory for path '%s' since it is a file.", f));
 
       }
@@ -556,7 +610,7 @@ public class NativeS3FileSystem extends FileSystem {
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
     if (fs.isDirectory()) {
-      throw new IOException("'" + f + "' is a directory");
+      throw new FileNotFoundException("'" + f + "' is a directory");
     }
     LOG.info("Opening '" + f + "' for reading");
     Path absolutePath = makeAbsolute(f);

+ 37 - 100
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
-import java.util.Iterator;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -31,8 +31,10 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ScopedAclEntries;
 
 /**
  * Acl related operations
@@ -57,8 +59,8 @@ class AclCommands extends FsCommand {
     public static String DESCRIPTION = "Displays the Access Control Lists"
         + " (ACLs) of files and directories. If a directory has a default ACL,"
         + " then getfacl also displays the default ACL.\n"
-        + "-R: List the ACLs of all files and directories recursively.\n"
-        + "<path>: File or directory to list.\n";
+        + "  -R: List the ACLs of all files and directories recursively.\n"
+        + "  <path>: File or directory to list.\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -75,84 +77,43 @@ class AclCommands extends FsCommand {
 
     @Override
     protected void processPath(PathData item) throws IOException {
-      AclStatus aclStatus = item.fs.getAclStatus(item.path);
       out.println("# file: " + item);
-      out.println("# owner: " + aclStatus.getOwner());
-      out.println("# group: " + aclStatus.getGroup());
-      List<AclEntry> entries = aclStatus.getEntries();
-      if (aclStatus.isStickyBit()) {
-        String stickyFlag = "T";
-        for (AclEntry aclEntry : entries) {
-          if (aclEntry.getType() == AclEntryType.OTHER
-              && aclEntry.getScope() == AclEntryScope.ACCESS
-              && aclEntry.getPermission().implies(FsAction.EXECUTE)) {
-            stickyFlag = "t";
-            break;
-          }
-        }
-        out.println("# flags: --" + stickyFlag);
-      }
-
+      out.println("# owner: " + item.stat.getOwner());
+      out.println("# group: " + item.stat.getGroup());
       FsPermission perm = item.stat.getPermission();
-      if (entries.isEmpty()) {
-        printMinimalAcl(perm);
-      } else {
-        printExtendedAcl(perm, entries);
+      if (perm.getStickyBit()) {
+        out.println("# flags: --" +
+          (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
       }
 
+      List<AclEntry> entries = perm.getAclBit() ?
+        item.fs.getAclStatus(item.path).getEntries() :
+        Collections.<AclEntry>emptyList();
+      ScopedAclEntries scopedEntries = new ScopedAclEntries(
+        AclUtil.getAclFromPermAndEntries(perm, entries));
+      printAclEntriesForSingleScope(scopedEntries.getAccessEntries());
+      printAclEntriesForSingleScope(scopedEntries.getDefaultEntries());
       out.println();
     }
 
     /**
-     * Prints an extended ACL, including all extended ACL entries and also the
-     * base entries implied by the permission bits.
+     * Prints all the ACL entries in a single scope.
      *
-     * @param perm FsPermission of file
      * @param entries List<AclEntry> containing ACL entries of file
      */
-    private void printExtendedAcl(FsPermission perm, List<AclEntry> entries) {
-      // Print owner entry implied by owner permission bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-
-      // Print all extended access ACL entries.
-      boolean hasAccessAcl = false;
-      Iterator<AclEntry> entryIter = entries.iterator();
-      AclEntry curEntry = null;
-      while (entryIter.hasNext()) {
-        curEntry = entryIter.next();
-        if (curEntry.getScope() == AclEntryScope.DEFAULT) {
-          break;
-        }
-        hasAccessAcl = true;
-        printExtendedAclEntry(curEntry, perm.getGroupAction());
+    private void printAclEntriesForSingleScope(List<AclEntry> entries) {
+      if (entries.isEmpty()) {
+        return;
       }
-
-      // Print mask entry implied by group permission bits, or print group entry
-      // if there is no access ACL (only default ACL).
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-
-      // Print other entry implied by other bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-
-      // Print default ACL entries.
-      if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
-        out.println(curEntry);
-        // ACL sort order guarantees default mask is the second-to-last entry.
+      if (AclUtil.isMinimalAcl(entries)) {
+        for (AclEntry entry: entries) {
+          out.println(entry);
+        }
+      } else {
+        // ACL sort order guarantees mask is the second-to-last entry.
         FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
-        while (entryIter.hasNext()) {
-          printExtendedAclEntry(entryIter.next(), maskPerm);
+        for (AclEntry entry: entries) {
+          printExtendedAclEntry(entry, maskPerm);
         }
       }
     }
@@ -180,30 +141,6 @@ class AclCommands extends FsCommand {
         out.println(entry);
       }
     }
-
-    /**
-     * Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the
-     * permission bits.
-     *
-     * @param perm FsPermission of file
-     */
-    private void printMinimalAcl(FsPermission perm) {
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-    }
   }
 
   /**
@@ -216,19 +153,19 @@ class AclCommands extends FsCommand {
     public static String DESCRIPTION = "Sets Access Control Lists (ACLs)"
         + " of files and directories.\n" 
         + "Options:\n"
-        + "-b :Remove all but the base ACL entries. The entries for user,"
+        + "  -b :Remove all but the base ACL entries. The entries for user,"
         + " group and others are retained for compatibility with permission "
         + "bits.\n" 
-        + "-k :Remove the default ACL.\n"
-        + "-R :Apply operations to all files and directories recursively.\n"
-        + "-m :Modify ACL. New entries are added to the ACL, and existing"
+        + "  -k :Remove the default ACL.\n"
+        + "  -R :Apply operations to all files and directories recursively.\n"
+        + "  -m :Modify ACL. New entries are added to the ACL, and existing"
         + " entries are retained.\n"
-        + "-x :Remove specified ACL entries. Other ACL entries are retained.\n"
-        + "--set :Fully replace the ACL, discarding all existing entries."
+        + "  -x :Remove specified ACL entries. Other ACL entries are retained.\n"
+        + "  --set :Fully replace the ACL, discarding all existing entries."
         + " The <acl_spec> must include entries for user, group, and others"
         + " for compatibility with permission bits.\n"
-        + "<acl_spec>: Comma separated list of ACL entries.\n"
-        + "<path>: File or directory to modify.\n";
+        + "  <acl_spec>: Comma separated list of ACL entries.\n"
+        + "  <path>: File or directory to modify.\n";
 
     CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R",
         "m", "x", "-set");

+ 111 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -22,7 +22,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -34,6 +40,9 @@ import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathOperationException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -45,7 +54,6 @@ import org.apache.hadoop.io.IOUtils;
 abstract class CommandWithDestination extends FsCommand {  
   protected PathData dst;
   private boolean overwrite = false;
-  private boolean preserve = false;
   private boolean verifyChecksum = true;
   private boolean writeChecksum = true;
   
@@ -74,7 +82,54 @@ abstract class CommandWithDestination extends FsCommand {
    * implementation allows.
    */
   protected void setPreserve(boolean preserve) {
-    this.preserve = preserve;
+    if (preserve) {
+      preserve(FileAttribute.TIMESTAMPS);
+      preserve(FileAttribute.OWNERSHIP);
+      preserve(FileAttribute.PERMISSION);
+    } else {
+      preserveStatus.clear();
+    }
+  }
+  
+  protected static enum FileAttribute {
+    TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
+
+    public static FileAttribute getAttribute(char symbol) {
+      for (FileAttribute attribute : values()) {
+        if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) {
+          return attribute;
+        }
+      }
+      throw new NoSuchElementException("No attribute for " + symbol);
+    }
+  }
+  
+  private EnumSet<FileAttribute> preserveStatus = 
+      EnumSet.noneOf(FileAttribute.class);
+  
+  /**
+   * Checks if the input attribute should be preserved or not
+   *
+   * @param attribute - Attribute to check
+   * @return boolean true if attribute should be preserved, false otherwise
+   */
+  private boolean shouldPreserve(FileAttribute attribute) {
+    return preserveStatus.contains(attribute);
+  }
+  
+  /**
+   * Add file attributes that need to be preserved. This method may be
+   * called multiple times to add attributes.
+   *
+   * @param fileAttribute - Attribute to add, one at a time
+   */
+  protected void preserve(FileAttribute fileAttribute) {
+    for (FileAttribute attribute : preserveStatus) {
+      if (attribute.equals(fileAttribute)) {
+        return;
+      }
+    }
+    preserveStatus.add(fileAttribute);
   }
 
   /**
@@ -212,6 +267,9 @@ abstract class CommandWithDestination extends FsCommand {
         dst.refreshStatus(); // need to update stat to know it exists now
       }      
       super.recursePath(src);
+      if (dst.stat.isDirectory()) {
+        preserveAttributes(src, dst);
+      }
     } finally {
       dst = savedDst;
     }
@@ -243,19 +301,7 @@ abstract class CommandWithDestination extends FsCommand {
     try {
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
-      if(preserve) {
-        target.fs.setTimes(
-          target.path,
-          src.stat.getModificationTime(),
-          src.stat.getAccessTime());
-        target.fs.setOwner(
-          target.path,
-          src.stat.getOwner(),
-          src.stat.getGroup());
-        target.fs.setPermission(
-          target.path,
-          src.stat.getPermission());
-      }
+      preserveAttributes(src, target);
     } finally {
       IOUtils.closeStream(in);
     }
@@ -285,6 +331,56 @@ abstract class CommandWithDestination extends FsCommand {
     }
   }
 
+  /**
+   * Preserve the attributes of the source to the target.
+   * The method calls {@link #shouldPreserve(FileAttribute)} to check what
+   * attribute to preserve.
+   * @param src source to preserve
+   * @param target where to preserve attributes
+   * @throws IOException if fails to preserve attributes
+   */
+  protected void preserveAttributes(PathData src, PathData target)
+      throws IOException {
+    if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
+      target.fs.setTimes(
+          target.path,
+          src.stat.getModificationTime(),
+          src.stat.getAccessTime());
+    }
+    if (shouldPreserve(FileAttribute.OWNERSHIP)) {
+      target.fs.setOwner(
+          target.path,
+          src.stat.getOwner(),
+          src.stat.getGroup());
+    }
+    if (shouldPreserve(FileAttribute.PERMISSION) ||
+        shouldPreserve(FileAttribute.ACL)) {
+      target.fs.setPermission(
+          target.path,
+          src.stat.getPermission());
+    }
+    if (shouldPreserve(FileAttribute.ACL)) {
+      FsPermission perm = src.stat.getPermission();
+      if (perm.getAclBit()) {
+        List<AclEntry> srcEntries =
+            src.fs.getAclStatus(src.path).getEntries();
+        List<AclEntry> srcFullEntries =
+            AclUtil.getAclFromPermAndEntries(perm, srcEntries);
+        target.fs.setAcl(target.path, srcFullEntries);
+      }
+    }
+    if (shouldPreserve(FileAttribute.XATTR)) {
+      Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
+      if (srcXAttrs != null) {
+        Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
+        while (iter.hasNext()) {
+          Entry<String, byte[]> entry = iter.next();
+          target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+        }
+      }
+    }
+  }
+
   // Helper filter filesystem that registers created files as temp files to
   // be deleted on exit unless successfully renamed
   private static class TargetFileSystem extends FilterFileSystem {

+ 50 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -54,10 +55,10 @@ class CopyCommands {
     public static final String NAME = "getmerge";    
     public static final String USAGE = "[-nl] <src> <localdst>";
     public static final String DESCRIPTION =
-      "Get all the files in the directories that\n" +
-      "match the source file pattern and merge and sort them to only\n" +
+      "Get all the files in the directories that " +
+      "match the source file pattern and merge and sort them to only " +
       "one file on local fs. <src> is kept.\n" +
-      "  -nl   Add a newline character at the end of each file.";
+      "-nl: Add a newline character at the end of each file.";
 
     protected PathData dst = null;
     protected String delimiter = null;
@@ -132,24 +133,49 @@ class CopyCommands {
 
   static class Cp extends CommandWithDestination {
     public static final String NAME = "cp";
-    public static final String USAGE = "[-f] [-p] <src> ... <dst>";
+    public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src> to a\n" +
-      "destination.  When copying multiple files, the destination\n" +
-      "must be a directory. Passing -p preserves access and\n" +
-      "modification times, ownership and the mode. Passing -f\n" +
-      "overwrites the destination if it already exists.\n";
-    
+      "Copy files that match the file pattern <src> to a " +
+      "destination.  When copying multiple files, the destination " +
+      "must be a directory. Passing -p preserves status " +
+      "[topax] (timestamps, ownership, permission, ACLs, XAttr). " +
+      "If -p is specified with no <arg>, then preserves " +
+      "timestamps, ownership, permission. If -pa is specified, " +
+      "then preserves permission also because ACL is a super-set of " +
+      "permission. Passing -f overwrites the destination if it " +
+      "already exists.\n";
+
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f", "p");
+      popPreserveOption(args);
+      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
       cf.parse(args);
       setOverwrite(cf.getOpt("f"));
-      setPreserve(cf.getOpt("p"));
       // should have a -r option
       setRecursive(true);
       getRemoteDestination(args);
     }
+    
+    private void popPreserveOption(List<String> args) {
+      for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
+        String cur = iter.next();
+        if (cur.equals("--")) {
+          // stop parsing arguments when you see --
+          break;
+        } else if (cur.startsWith("-p")) {
+          iter.remove();
+          if (cur.length() == 2) {
+            setPreserve(true);
+          } else {
+            String attributes = cur.substring(2);
+            for (int index = 0; index < attributes.length(); index++) {
+              preserve(FileAttribute.getAttribute(attributes.charAt(index)));
+            }
+          }
+          return;
+        }
+      }
+    }
   }
   
   /** 
@@ -160,10 +186,10 @@ class CopyCommands {
     public static final String USAGE =
       "[-p] [-ignoreCrc] [-crc] <src> ... <localdst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src>\n" +
-      "to the local name.  <src> is kept.  When copying multiple,\n" +
-      "files, the destination must be a directory. Passing\n" +
-      "-p preserves access and modification times,\n" +
+      "Copy files that match the file pattern <src> " +
+      "to the local name.  <src> is kept.  When copying multiple " +
+      "files, the destination must be a directory. Passing " +
+      "-p preserves access and modification times, " +
       "ownership and the mode.\n";
 
     @Override
@@ -187,11 +213,11 @@ class CopyCommands {
     public static final String NAME = "put";
     public static final String USAGE = "[-f] [-p] <localsrc> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files from the local file system\n" +
-      "into fs. Copying fails if the file already\n" +
-      "exists, unless the -f flag is given. Passing\n" +
-      "-p preserves access and modification times,\n" +
-      "ownership and the mode. Passing -f overwrites\n" +
+      "Copy files from the local file system " +
+      "into fs. Copying fails if the file already " +
+      "exists, unless the -f flag is given. Passing " +
+      "-p preserves access and modification times, " +
+      "ownership and the mode. Passing -f overwrites " +
       "the destination if it already exists.\n";
 
     @Override
@@ -254,9 +280,9 @@ class CopyCommands {
     public static final String NAME = "appendToFile";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION =
-        "Appends the contents of all the given local files to the\n" +
-            "given dst file. The dst file will be created if it does\n" +
-            "not exist. If <localSrc> is -, then the input is read\n" +
+        "Appends the contents of all the given local files to the " +
+            "given dst file. The dst file will be created if it does " +
+            "not exist. If <localSrc> is -, then the input is read " +
             "from stdin.";
 
     private static final int DEFAULT_IO_LENGTH = 1024 * 1024;

+ 31 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java

@@ -42,16 +42,22 @@ public class Count extends FsCommand {
     factory.addClass(Count.class, "-count");
   }
 
+  private static final String OPTION_QUOTA = "q";
+  private static final String OPTION_HUMAN = "h";
+
   public static final String NAME = "count";
-  public static final String USAGE = "[-q] <path> ...";
+  public static final String USAGE =
+      "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] <path> ...";
   public static final String DESCRIPTION = 
       "Count the number of directories, files and bytes under the paths\n" +
       "that match the specified file pattern.  The output columns are:\n" +
       "DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or\n" +
       "QUOTA REMAINING_QUOTA SPACE_QUOTA REMAINING_SPACE_QUOTA \n" +
-      "      DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME";
+      "      DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME\n" +
+      "The -h option shows file sizes in human readable format.";
   
   private boolean showQuotas;
+  private boolean humanReadable;
 
   /** Constructor */
   public Count() {}
@@ -70,17 +76,37 @@ public class Count extends FsCommand {
 
   @Override
   protected void processOptions(LinkedList<String> args) {
-    CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "q");
+    CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
+      OPTION_QUOTA, OPTION_HUMAN);
     cf.parse(args);
     if (args.isEmpty()) { // default path is the current working directory
       args.add(".");
     }
-    showQuotas = cf.getOpt("q");
+    showQuotas = cf.getOpt(OPTION_QUOTA);
+    humanReadable = cf.getOpt(OPTION_HUMAN);
   }
 
   @Override
   protected void processPath(PathData src) throws IOException {
     ContentSummary summary = src.fs.getContentSummary(src.path);
-    out.println(summary.toString(showQuotas) + src);
+    out.println(summary.toString(showQuotas, isHumanReadable()) + src);
+  }
+  
+  /**
+   * Should quotas get shown as part of the report?
+   * @return if quotas should be shown then true otherwise false
+   */
+  @InterfaceAudience.Private
+  boolean isShowQuotas() {
+    return showQuotas;
+  }
+  
+  /**
+   * Should sizes be shown in human readable format rather than bytes?
+   * @return true if human readable format
+   */
+  @InterfaceAudience.Private
+  boolean isHumanReadable() {
+    return humanReadable;
   }
 }

部分文件因为文件数量过多而无法显示