Explorar o código

Merge branch 'trunk' into HDFS-7240

Arpit Agarwal %!s(int64=9) %!d(string=hai) anos
pai
achega
76279f5877
Modificáronse 100 ficheiros con 3811 adicións e 1152 borrados
  1. 4 4
      BUILDING.txt
  2. 14 12
      dev-support/docker/Dockerfile
  3. 2 2
      hadoop-assemblies/pom.xml
  4. 56 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  5. 1 1
      hadoop-build-tools/pom.xml
  6. 2 2
      hadoop-client/pom.xml
  7. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  8. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  9. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  10. 5 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  11. 1 6
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  12. 4 13
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  13. 2 2
      hadoop-common-project/hadoop-common/pom.xml
  14. 189 128
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  15. 23 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  16. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  17. 47 42
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  18. 32 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  19. 75 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  20. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  21. 43 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
  22. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
  23. 51 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  24. 136 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
  25. 11 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  26. 130 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java
  27. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
  28. 93 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
  29. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  30. 113 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java
  31. 74 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputBuffer.java
  32. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  33. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  34. 30 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
  35. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
  36. 180 170
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  37. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
  38. 11 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  39. 141 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricStringBuilder.java
  40. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
  41. 111 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  42. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
  43. 45 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
  44. 197 72
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  45. 75 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
  46. 11 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
  47. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  48. 66 47
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
  49. 35 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java
  50. 98 68
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
  51. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
  52. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
  53. 1 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
  54. 167 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/XFrameOptionsFilter.java
  55. 22 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/package-info.java
  56. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
  57. 29 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
  58. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
  59. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java
  60. 31 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
  61. 65 3
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  62. 15 7
      hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
  63. 5 0
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  64. 6 0
      hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md
  65. 6 0
      hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
  66. 83 4
      hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
  67. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
  68. 27 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  69. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  70. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  71. 2 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
  72. 67 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFileAppend.java
  73. 113 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/FakeCompressor.java
  74. 109 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/FakeDecompressor.java
  75. 2 178
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
  76. 99 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java
  77. 20 47
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
  78. 35 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
  79. 16 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java
  80. 18 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
  81. 57 14
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java
  82. 100 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithOneQuery.java
  83. 18 23
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java
  84. 18 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  85. 52 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
  86. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
  87. 22 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestRestCsrfPreventionFilter.java
  88. 151 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestXFrameOptionsFilter.java
  89. 25 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
  90. 2 27
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
  91. 88 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
  92. 2 2
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
  93. 32 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
  94. 78 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommands.bats
  95. 2 2
      hadoop-common-project/hadoop-kms/pom.xml
  96. 4 10
      hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
  97. 2 2
      hadoop-common-project/hadoop-minikdc/pom.xml
  98. 2 2
      hadoop-common-project/hadoop-nfs/pom.xml
  99. 2 2
      hadoop-common-project/pom.xml
  100. 2 2
      hadoop-dist/pom.xml

+ 4 - 4
BUILDING.txt

@@ -4,7 +4,7 @@ Build instructions for Hadoop
 Requirements:
 
 * Unix System
-* JDK 1.7+
+* JDK 1.8+
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
@@ -56,12 +56,12 @@ Known issues:
 ----------------------------------------------------------------------------------
 Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
 
-* Oracle JDK 1.7 (preferred)
+* Oracle JDK 1.8 (preferred)
   $ sudo apt-get purge openjdk*
   $ sudo apt-get install software-properties-common
   $ sudo add-apt-repository ppa:webupd8team/java
   $ sudo apt-get update
-  $ sudo apt-get install oracle-java7-installer
+  $ sudo apt-get install oracle-java8-installer
 * Maven
   $ sudo apt-get -y install maven
 * Native libraries
@@ -306,7 +306,7 @@ Building on Windows
 Requirements:
 
 * Windows System
-* JDK 1.7+
+* JDK 1.8+
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0

+ 14 - 12
dev-support/docker/Dockerfile

@@ -28,9 +28,11 @@ ENV DEBCONF_TERSE true
 
 ######
 # Install common dependencies from packages
+#
+# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
+# Ubuntu Java.  See Java section below!
 ######
 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
-    ant \
     build-essential \
     bzip2 \
     cmake \
@@ -42,17 +44,14 @@ RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
     git \
     gnupg-agent \
     make \
-    maven \
     libbz2-dev \
     libcurl4-openssl-dev \
     libfuse-dev \
-    libjansson-dev \
     libprotobuf-dev \
     libprotoc-dev \
     libsnappy-dev \
     libssl-dev \
     libtool \
-    openjdk-7-jdk \
     pinentry-curses \
     pkg-config \
     protobuf-compiler \
@@ -63,10 +62,6 @@ RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
     snappy \
     zlib1g-dev
 
-# Fixing the Apache commons / Maven dependency problem under Ubuntu:
-# See http://wiki.apache.org/commons/VfsProblems
-RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
-
 ######
 # Install ISA-L library
 ######
@@ -86,14 +81,21 @@ RUN apt-get -q install --no-install-recommends -y software-properties-common
 RUN add-apt-repository -y ppa:webupd8team/java
 RUN apt-get -q update
 
-# Auto-accept the Oracle JDK license
-RUN echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
-RUN apt-get -q install --no-install-recommends -y oracle-java7-installer
-
 # Auto-accept the Oracle JDK license
 RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
 RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
 
+####
+# Apps that require Java
+###
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+    ant \
+    maven
+
+# Fixing the Apache commons / Maven dependency problem under Ubuntu:
+# See http://wiki.apache.org/commons/VfsProblems
+RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
+
 ######
 # Install findbugs
 ######

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,12 +23,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 56 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -23,6 +23,38 @@
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
   <fileSets>
+    <fileSet>
+      <directory>../hadoop-archive-logs/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-archives/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-distcp/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-extras/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
     <fileSet>
       <directory>../hadoop-pipes/src/main/native/pipes/api/hadoop</directory>
       <includes>
@@ -93,6 +125,14 @@
         <include>*-sources.jar</include>
       </includes>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-gridmix/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
     <fileSet>
       <directory>../hadoop-rumen/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
@@ -100,6 +140,14 @@
         <include>*-sources.jar</include>
       </includes>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-rumen/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
     <fileSet>
       <directory>../hadoop-streaming/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
@@ -107,6 +155,14 @@
         <include>*-sources.jar</include>
       </includes>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-streaming/src/main/shellprofile.d</directory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <outputDirectory>/libexec/shellprofile.d</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
     <fileSet>
       <directory>../hadoop-sls/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>

+ 1 - 1
hadoop-build-tools/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-SNAPSHOT</version>
+   <version>3.0.0-alpha1-SNAPSHOT</version>
    <relativePath>../hadoop-project-dist</relativePath>
  </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

+ 5 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -323,8 +323,8 @@ public class KerberosName {
         }
       }
       if (result != null && nonSimplePattern.matcher(result).find()) {
-        throw new NoMatchingRule("Non-simple name " + result +
-                                 " after auth_to_local rule " + this);
+        LOG.info("Non-simple name {} after auth_to_local rule {}",
+            result, this);
       }
       if (toLowerCase && result != null) {
         result = result.toLowerCase(Locale.ENGLISH);
@@ -377,7 +377,7 @@ public class KerberosName {
   /**
    * Get the translation of the principal name into an operating system
    * user name.
-   * @return the short name
+   * @return the user name
    * @throws IOException throws if something is wrong with the rules
    */
   public String getShortName() throws IOException {
@@ -397,7 +397,8 @@ public class KerberosName {
         return result;
       }
     }
-    throw new NoMatchingRule("No rules applied to " + toString());
+    LOG.info("No auth_to_local rules applied to {}", this);
+    return toString();
   }
 
   /**

+ 1 - 6
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -109,12 +109,7 @@ public class TestKerberosAuthenticationHandler
     kn = new KerberosName("bar@BAR");
     Assert.assertEquals("bar", kn.getShortName());
     kn = new KerberosName("bar@FOO");
-    try {
-      kn.getShortName();
-      Assert.fail();
-    }
-    catch (Exception ex) {      
-    }
+    Assert.assertEquals("bar@FOO", kn.getShortName());
   }
 
   @Test(timeout=60000)

+ 4 - 13
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -72,23 +72,14 @@ public class TestKerberosName {
     }
   }
 
-  private void checkBadTranslation(String from) {
-    System.out.println("Checking bad translation for " + from);
-    KerberosName nm = new KerberosName(from);
-    try {
-      nm.getShortName();
-      Assert.fail("didn't get exception for " + from);
-    } catch (IOException ie) {
-      // PASS
-    }
-  }
-
   @Test
   public void testAntiPatterns() throws Exception {
     checkBadName("owen/owen/owen@FOO.COM");
     checkBadName("owen@foo/bar.com");
-    checkBadTranslation("foo@ACME.COM");
-    checkBadTranslation("root/joe@FOO.COM");
+
+    // no rules applied, these should pass
+    checkTranslation("foo@ACME.COM", "foo@ACME.COM");
+    checkTranslation("root/joe@FOO.COM", "root/joe@FOO.COM");
   }
 
   @Test

+ 2 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

+ 189 - 128
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -16,7 +16,12 @@
 # limitations under the License.
 
 MYNAME="${BASH_SOURCE-$0}"
+HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
 
+## @description  build up the hadoop command's usage text.
+## @audience     public
+## @stability    stable
+## @replaceable  no
 function hadoop_usage
 {
   hadoop_add_option "buildpaths" "attempt to add class files from build tree"
@@ -25,14 +30,11 @@ function hadoop_usage
   hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
   hadoop_add_option "slaves" "turn on slave mode"
 
-  hadoop_add_subcommand "archive" "create a Hadoop archive"
   hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
   hadoop_add_subcommand "conftest" "validate configuration XML files"
   hadoop_add_subcommand "credential" "interact with credential providers"
   hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
-  hadoop_add_subcommand "distch" "distributed metadata changer"
-  hadoop_add_subcommand "distcp" "copy file or directories recursively"
   hadoop_add_subcommand "dtutil" "operations related to delegation tokens"
   hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
   hadoop_add_subcommand "fs" "run a generic filesystem user client"
@@ -42,7 +44,127 @@ function hadoop_usage
   hadoop_add_subcommand "key" "manage keys via the KeyProvider"
   hadoop_add_subcommand "trace" "view and modify Hadoop tracing settings"
   hadoop_add_subcommand "version" "print the version"
-  hadoop_generate_usage "${MYNAME}" true
+  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
+}
+
+## @description  Default command handler for hadoop command
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        CLI arguments
+function hadoopcmd_case
+{
+  subcmd=$1
+  shift
+
+  case ${subcmd} in
+    balancer|datanode|dfs|dfsadmin|dfsgroups|  \
+    namenode|secondarynamenode|fsck|fetchdt|oiv| \
+    portmap|nfs3)
+      hadoop_error "WARNING: Use of this script to execute ${subcmd} is deprecated."
+      subcmd=${subcmd/dfsgroups/groups}
+      hadoop_error "WARNING: Attempting to execute replacement \"hdfs ${subcmd}\" instead."
+      hadoop_error ""
+      #try to locate hdfs and if present, delegate to it.
+      if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
+        # shellcheck disable=SC2086
+        exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
+        --config "${HADOOP_CONF_DIR}" "${subcmd}"  "$@"
+      elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
+        # shellcheck disable=SC2086
+        exec "${HADOOP_HOME}/bin/hdfs" \
+        --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
+      else
+        hadoop_error "HADOOP_HDFS_HOME not found!"
+        exit 1
+      fi
+    ;;
+
+    #mapred commands for backwards compatibility
+    pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
+      hadoop_error "WARNING: Use of this script to execute ${subcmd} is deprecated."
+      subcmd=${subcmd/mrgroups/groups}
+      hadoop_error "WARNING: Attempting to execute replacement \"mapred ${subcmd}\" instead."
+      hadoop_error ""
+      #try to locate mapred and if present, delegate to it.
+      if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
+        exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
+        --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
+      elif [[ -f "${HADOOP_HOME}/bin/mapred" ]]; then
+        exec "${HADOOP_HOME}/bin/mapred" \
+        --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
+      else
+        hadoop_error "HADOOP_MAPRED_HOME not found!"
+        exit 1
+      fi
+    ;;
+    checknative)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.NativeLibraryChecker
+    ;;
+    classpath)
+      hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
+    ;;
+    conftest)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.ConfTest
+    ;;
+    credential)
+      HADOOP_CLASSNAME=org.apache.hadoop.security.alias.CredentialShell
+    ;;
+    daemonlog)
+      HADOOP_CLASSNAME=org.apache.hadoop.log.LogLevel
+    ;;
+    dtutil)
+      HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell
+    ;;
+    envvars)
+      echo "JAVA_HOME='${JAVA_HOME}'"
+      echo "HADOOP_COMMON_HOME='${HADOOP_COMMON_HOME}'"
+      echo "HADOOP_COMMON_DIR='${HADOOP_COMMON_DIR}'"
+      echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'"
+      echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'"
+      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
+      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
+      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
+      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
+      exit 0
+    ;;
+    fs)
+      HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
+    ;;
+    jar)
+      if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+        hadoop_error "WARNING: Use \"yarn jar\" to launch YARN applications."
+      fi
+      HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
+    ;;
+    jnipath)
+      hadoop_finalize
+      echo "${JAVA_LIBRARY_PATH}"
+      exit 0
+    ;;
+    kerbname)
+      HADOOP_CLASSNAME=org.apache.hadoop.security.HadoopKerberosName
+    ;;
+    key)
+      HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell
+    ;;
+    trace)
+      HADOOP_CLASSNAME=org.apache.hadoop.tracing.TraceAdmin
+    ;;
+    version)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
+    ;;
+    *)
+      HADOOP_CLASSNAME="${subcmd}"
+      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+  esac
+
+  # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 # This script runs the hadoop core commands.
@@ -69,137 +191,76 @@ if [ $# = 0 ]; then
   hadoop_exit_with_usage 1
 fi
 
-COMMAND=$1
+HADOOP_SUBCMD=$1
 shift
 
-case ${COMMAND} in
-  balancer|datanode|dfs|dfsadmin|dfsgroups|  \
-  namenode|secondarynamenode|fsck|fetchdt|oiv| \
-  portmap|nfs3)
-    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
-    COMMAND=${COMMAND/dfsgroups/groups}
-    hadoop_error "WARNING: Attempting to execute replacement \"hdfs ${COMMAND}\" instead."
-    hadoop_error ""
-    #try to locate hdfs and if present, delegate to it.
-    if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
-      # shellcheck disable=SC2086
-      exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
-      --config "${HADOOP_CONF_DIR}" "${COMMAND}"  "$@"
-    elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
-      # shellcheck disable=SC2086
-      exec "${HADOOP_HOME}/bin/hdfs" \
-      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
-    else
-      hadoop_error "HADOOP_HDFS_HOME not found!"
-      exit 1
-    fi
-  ;;
-
-  #mapred commands for backwards compatibility
-  pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
-    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
-    COMMAND=${COMMAND/mrgroups/groups}
-    hadoop_error "WARNING: Attempting to execute replacement \"mapred ${COMMAND}\" instead."
-    hadoop_error ""
-    #try to locate mapred and if present, delegate to it.
-    if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
-      exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
-      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
-    elif [[ -f "${HADOOP_HOME}/bin/mapred" ]]; then
-      exec "${HADOOP_HOME}/bin/mapred" \
-      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
-    else
-      hadoop_error "HADOOP_MAPRED_HOME not found!"
-      exit 1
-    fi
-  ;;
-  archive)
-    CLASS=org.apache.hadoop.tools.HadoopArchives
-    hadoop_add_to_classpath_tools hadoop-archives
-  ;;
-  checknative)
-    CLASS=org.apache.hadoop.util.NativeLibraryChecker
-  ;;
-  classpath)
-    hadoop_do_classpath_subcommand CLASS "$@"
-  ;;
-  conftest)
-    CLASS=org.apache.hadoop.util.ConfTest
-  ;;
-  credential)
-    CLASS=org.apache.hadoop.security.alias.CredentialShell
-  ;;
-  daemonlog)
-    CLASS=org.apache.hadoop.log.LogLevel
-  ;;
-  distch)
-    CLASS=org.apache.hadoop.tools.DistCh
-    hadoop_add_to_classpath_tools hadoop-extras
-  ;;
-  distcp)
-    CLASS=org.apache.hadoop.tools.DistCp
-    hadoop_add_to_classpath_tools hadoop-distcp
-  ;;
-  dtutil)
-    CLASS=org.apache.hadoop.security.token.DtUtilShell
-  ;;
-  envvars)
-    echo "JAVA_HOME='${JAVA_HOME}'"
-    echo "HADOOP_COMMON_HOME='${HADOOP_COMMON_HOME}'"
-    echo "HADOOP_COMMON_DIR='${HADOOP_COMMON_DIR}'"
-    echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'"
-    echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'"
-    echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
-    echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
-    echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
-    echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
-    exit 0
-  ;;
-  fs)
-    CLASS=org.apache.hadoop.fs.FsShell
-  ;;
-  jar)
-    if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
-      hadoop_error "WARNING: Use \"yarn jar\" to launch YARN applications."
-    fi
-    CLASS=org.apache.hadoop.util.RunJar
-  ;;
-  jnipath)
-    hadoop_finalize
-    echo "${JAVA_LIBRARY_PATH}"
-    exit 0
-  ;;
-  kerbname)
-    CLASS=org.apache.hadoop.security.HadoopKerberosName
-  ;;
-  key)
-    CLASS=org.apache.hadoop.crypto.key.KeyShell
-  ;;
-  trace)
-    CLASS=org.apache.hadoop.tracing.TraceAdmin
-  ;;
-  version)
-    CLASS=org.apache.hadoop.util.VersionInfo
-  ;;
-  *)
-    CLASS="${COMMAND}"
-    if ! hadoop_validate_classname "${CLASS}"; then
-      hadoop_exit_with_usage 1
-    fi
-  ;;
-esac
-
-hadoop_verify_user "${COMMAND}"
+HADOOP_SUBCMD_ARGS=("$@")
+
+if declare -f hadoop_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
+  hadoop_debug "Calling dynamically: hadoop_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}"
+  "hadoop_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+else
+  hadoopcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+fi
+
+hadoop_verify_user "${HADOOP_SUBCMD}"
 
 if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
-  hadoop_common_slave_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" "${HADOOP_USER_PARAMS[@]}"
+  hadoop_common_slave_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 
-# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
+  HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+  hadoop_verify_secure_prereq
+  hadoop_setup_secure_service
+  priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
+  priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+  daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+else
+  daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+fi
+
+if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
+  # shellcheck disable=SC2034
+  HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
+  if [[ -n "${HADOOP_SUBCMD_SECURESERVICE}" ]]; then
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
+  else
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
+  fi
+fi
 
 hadoop_finalize
-hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 
+if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
+  if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
+    hadoop_secure_daemon_handler \
+      "${HADOOP_DAEMON_MODE}" \
+      "${HADOOP_SUBCMD}" \
+      "${HADOOP_CLASSNAME}" \
+      "${daemon_pidfile}" \
+      "${daemon_outfile}" \
+      "${priv_pidfile}" \
+      "${priv_outfile}" \
+      "${priv_errfile}" \
+      "${HADOOP_SUBCMD_ARGS[@]}"
+  else
+    hadoop_daemon_handler \
+      "${HADOOP_DAEMON_MODE}" \
+      "${HADOOP_SUBCMD}" \
+      "${HADOOP_CLASSNAME}" \
+      "${daemon_pidfile}" \
+      "${daemon_outfile}" \
+      "${HADOOP_SUBCMD_ARGS[@]}"
+  fi
+  exit $?
+else
+  # shellcheck disable=SC2086
+  hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
+fi

+ 23 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -2082,3 +2082,26 @@ function hadoop_parse_args
 
   hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
 }
+
+## @description  XML-escapes the characters (&'"<>) in the given parameter.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        string
+## @return       XML-escaped string
+function hadoop_xml_escape
+{
+  sed -e 's/&/\&amp;/g' -e 's/"/\\\&quot;/g' \
+    -e "s/'/\\\\\&apos;/g" -e 's/</\\\&lt;/g' -e 's/>/\\\&gt;/g' <<< "$1"
+}
+
+## @description  sed-escapes the characters (\/&) in the given parameter.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        string
+## @return       sed-escaped string
+function hadoop_sed_escape
+{
+  sed -e 's/[\/&]/\\&/g' <<< "$1"
+}

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -2536,6 +2536,27 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return result.entrySet().iterator();
   }
 
+  /**
+   * Constructs a mapping of configuration and includes all properties that
+   * start with the specified configuration prefix.  Property names in the
+   * mapping are trimmed to remove the configuration prefix.
+   *
+   * @param confPrefix configuration prefix
+   * @return mapping of configuration properties with prefix stripped
+   */
+  public Map<String, String> getPropsWithPrefix(String confPrefix) {
+    Map<String, String> configMap = new HashMap<>();
+    for (Map.Entry<String, String> entry : this) {
+      String name = entry.getKey();
+      if (name.startsWith(confPrefix)) {
+        String value = this.get(name);
+        name = name.substring(confPrefix.length());
+        configMap.put(name, value);
+      }
+    }
+    return configMap;
+  }
+
   private Document parse(DocumentBuilder builder, URL url)
       throws IOException, SAXException {
     if (!quietmode) {

+ 47 - 42
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.crypto.key;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -38,12 +37,11 @@ import com.google.common.annotations.VisibleForTesting;
 import javax.crypto.spec.SecretKeySpec;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.net.URI;
-import java.net.URL;
+import java.security.GeneralSecurityException;
 import java.security.Key;
 import java.security.KeyStore;
 import java.security.KeyStoreException;
@@ -88,7 +86,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceAudience.Private
 public class JavaKeyStoreProvider extends KeyProvider {
   private static final String KEY_METADATA = "KeyMetadata";
-  private static Logger LOG =
+  private static final Logger LOG =
       LoggerFactory.getLogger(JavaKeyStoreProvider.class);
 
   public static final String SCHEME_NAME = "jceks";
@@ -103,8 +101,8 @@ public class JavaKeyStoreProvider extends KeyProvider {
   private final URI uri;
   private final Path path;
   private final FileSystem fs;
-  private final FsPermission permissions;
-  private final KeyStore keyStore;
+  private FsPermission permissions;
+  private KeyStore keyStore;
   private char[] password;
   private boolean changed = false;
   private Lock readLock;
@@ -131,29 +129,24 @@ public class JavaKeyStoreProvider extends KeyProvider {
     this.uri = uri;
     path = ProviderUtils.unnestUri(uri);
     fs = path.getFileSystem(conf);
-    // Get the password file from the conf, if not present from the user's
-    // environment var
-    if (System.getenv().containsKey(KEYSTORE_PASSWORD_ENV_VAR)) {
-      password = System.getenv(KEYSTORE_PASSWORD_ENV_VAR).toCharArray();
-    }
-    if (password == null) {
-      String pwFile = conf.get(KEYSTORE_PASSWORD_FILE_KEY);
-      if (pwFile != null) {
-        ClassLoader cl = Thread.currentThread().getContextClassLoader();
-        URL pwdFile = cl.getResource(pwFile);
-        if (pwdFile == null) {
-          // Provided Password file does not exist
-          throw new IOException("Password file does not exists");
-        }
-        try (InputStream is = pwdFile.openStream()) {
-          password = IOUtils.toString(is).trim().toCharArray();
-        }
-      }
-    }
-    if (password == null) {
-      password = KEYSTORE_PASSWORD_DEFAULT;
-    }
+    locateKeystore();
+    ReadWriteLock lock = new ReentrantReadWriteLock(true);
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  /**
+   * Open up and initialize the keyStore.
+   * @throws IOException If there is a problem reading the password file
+   * or a problem reading the keystore.
+   */
+  private void locateKeystore() throws IOException {
     try {
+      password = ProviderUtils.locatePassword(KEYSTORE_PASSWORD_ENV_VAR,
+          getConf().get(KEYSTORE_PASSWORD_FILE_KEY));
+      if (password == null) {
+        password = KEYSTORE_PASSWORD_DEFAULT;
+      }
       Path oldPath = constructOldPath(path);
       Path newPath = constructNewPath(path);
       keyStore = KeyStore.getInstance(SCHEME_NAME);
@@ -175,19 +168,14 @@ public class JavaKeyStoreProvider extends KeyProvider {
       permissions = perm;
     } catch (KeyStoreException e) {
       throw new IOException("Can't create keystore", e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IOException("Can't load keystore " + path, e);
-    } catch (CertificateException e) {
+    } catch (GeneralSecurityException e) {
       throw new IOException("Can't load keystore " + path, e);
     }
-    ReadWriteLock lock = new ReentrantReadWriteLock(true);
-    readLock = lock.readLock();
-    writeLock = lock.writeLock();
   }
 
   /**
    * Try loading from the user specified path, else load from the backup
-   * path in case Exception is not due to bad/wrong password
+   * path in case Exception is not due to bad/wrong password.
    * @param path Actual path to load from
    * @param backupPath Backup path (_OLD)
    * @return The permissions of the loaded file
@@ -256,7 +244,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
     if (perm == null) {
       keyStore.load(null, password);
       LOG.debug("KeyStore initialized anew successfully !!");
-      perm = new FsPermission("700");
+      perm = new FsPermission("600");
     }
     return perm;
   }
@@ -311,14 +299,31 @@ public class JavaKeyStoreProvider extends KeyProvider {
     }
   }
 
-  private Path constructNewPath(Path path) {
-    Path newPath = new Path(path.toString() + "_NEW");
-    return newPath;
+  private static Path constructNewPath(Path path) {
+    return new Path(path.toString() + "_NEW");
+  }
+
+  private static Path constructOldPath(Path path) {
+    return new Path(path.toString() + "_OLD");
   }
 
-  private Path constructOldPath(Path path) {
-    Path oldPath = new Path(path.toString() + "_OLD");
-    return oldPath;
+  @Override
+  public boolean needsPassword() throws IOException {
+    return (null == ProviderUtils.locatePassword(KEYSTORE_PASSWORD_ENV_VAR,
+        getConf().get(KEYSTORE_PASSWORD_FILE_KEY)));
+
+  }
+
+  @Override
+  public String noPasswordWarning() {
+    return ProviderUtils.noPasswordWarning(KEYSTORE_PASSWORD_ENV_VAR,
+        KEYSTORE_PASSWORD_FILE_KEY);
+  }
+
+  @Override
+  public String noPasswordError() {
+    return ProviderUtils.noPasswordError(KEYSTORE_PASSWORD_ENV_VAR,
+        KEYSTORE_PASSWORD_FILE_KEY);
   }
 
   @Override

+ 32 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -607,4 +607,36 @@ public abstract class KeyProvider {
     }
     throw new IOException("Can't find KeyProvider for key " + keyName);
   }
+
+  /**
+   * Does this provider require a password? This means that a password is
+   * required for normal operation, and it has not been found through normal
+   * means. If true, the password should be provided by the caller using
+   * setPassword().
+   * @return Whether or not the provider requires a password
+   * @throws IOException
+   */
+  public boolean needsPassword() throws IOException {
+    return false;
+  }
+
+  /**
+   * If a password for the provider is needed, but is not provided, this will
+   * return a warning and instructions for supplying said password to the
+   * provider.
+   * @return A warning and instructions for supplying the password
+   */
+  public String noPasswordWarning() {
+    return null;
+  }
+
+  /**
+   * If a password for the provider is needed, but is not provided, this will
+   * return an error message and instructions for supplying said password to
+   * the provider.
+   * @return An error message and instructions for supplying the password
+   */
+  public String noPasswordError() {
+    return null;
+  }
 }

+ 75 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
@@ -46,13 +47,24 @@ public class KeyShell extends Configured implements Tool {
       "   [" + DeleteCommand.USAGE + "]\n" +
       "   [" + ListCommand.USAGE + "]\n";
   private static final String LIST_METADATA = "keyShell.list.metadata";
+  @VisibleForTesting
+  public static final String NO_VALID_PROVIDERS =
+      "There are no valid (non-transient) providers configured.\n" +
+      "No action has been taken. Use the -provider option to specify\n" +
+      "a provider. If you want to use a transient provider then you\n" +
+      "MUST use the -provider argument.";
 
   private boolean interactive = true;
   private Command command = null;
 
-  /** allows stdout to be captured if necessary */
+  /** If true, fail if the provider requires a password and none is given. */
+  private boolean strict = false;
+
+  /** allows stdout to be captured if necessary. */
+  @VisibleForTesting
   public PrintStream out = System.out;
-  /** allows stderr to be captured if necessary */
+  /** allows stderr to be captured if necessary. */
+  @VisibleForTesting
   public PrintStream err = System.err;
 
   private boolean userSuppliedProvider = false;
@@ -76,7 +88,7 @@ public class KeyShell extends Configured implements Tool {
         return exitCode;
       }
       if (command.validate()) {
-          command.execute();
+        command.execute();
       } else {
         exitCode = 1;
       }
@@ -88,7 +100,7 @@ public class KeyShell extends Configured implements Tool {
   }
 
   /**
-   * Parse the command line arguments and initialize the data
+   * Parse the command line arguments and initialize the data.
    * <pre>
    * % hadoop key create keyName [-size size] [-cipher algorithm]
    *    [-provider providerPath]
@@ -171,6 +183,8 @@ public class KeyShell extends Configured implements Tool {
         getConf().setBoolean(LIST_METADATA, true);
       } else if ("-f".equals(args[i]) || ("-force".equals(args[i]))) {
         interactive = false;
+      } else if (args[i].equals("-strict")) {
+        strict = true;
       } else if ("-help".equals(args[i])) {
         printKeyShellUsage();
         return 1;
@@ -199,7 +213,7 @@ public class KeyShell extends Configured implements Tool {
       out.println(command.getUsage());
     } else {
       out.println("=========================================================" +
-      		"======");
+          "======");
       out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
       out.println("=========================================================" +
           "======");
@@ -221,16 +235,16 @@ public class KeyShell extends Configured implements Tool {
     }
 
     protected KeyProvider getKeyProvider() {
-      KeyProvider provider = null;
+      KeyProvider prov = null;
       List<KeyProvider> providers;
       try {
         providers = KeyProviderFactory.getProviders(getConf());
         if (userSuppliedProvider) {
-          provider = providers.get(0);
+          prov = providers.get(0);
         } else {
           for (KeyProvider p : providers) {
             if (!p.isTransient()) {
-              provider = p;
+              prov = p;
               break;
             }
           }
@@ -238,11 +252,14 @@ public class KeyShell extends Configured implements Tool {
       } catch (IOException e) {
         e.printStackTrace(err);
       }
-      return provider;
+      if (prov == null) {
+        out.println(NO_VALID_PROVIDERS);
+      }
+      return prov;
     }
 
     protected void printProviderWritten() {
-        out.println(provider + " has been updated.");
+      out.println(provider + " has been updated.");
     }
 
     protected void warnIfTransientProvider() {
@@ -258,12 +275,13 @@ public class KeyShell extends Configured implements Tool {
 
   private class ListCommand extends Command {
     public static final String USAGE =
-        "list [-provider <provider>] [-metadata] [-help]";
+        "list [-provider <provider>] [-strict] [-metadata] [-help]";
     public static final String DESC =
         "The list subcommand displays the keynames contained within\n" +
         "a particular provider as configured in core-site.xml or\n" +
         "specified with the -provider argument. -metadata displays\n" +
-        "the metadata.";
+        "the metadata. If -strict is supplied, fail immediately if\n" +
+        "the provider requires a password and none is given.";
 
     private boolean metadata = false;
 
@@ -271,10 +289,6 @@ public class KeyShell extends Configured implements Tool {
       boolean rc = true;
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no non-transient KeyProviders configured.\n"
-          + "Use the -provider option to specify a provider. If you\n"
-          + "want to list a transient provider then you must use the\n"
-          + "-provider argument.");
         rc = false;
       }
       metadata = getConf().getBoolean(LIST_METADATA, false);
@@ -310,12 +324,15 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class RollCommand extends Command {
-    public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
+    public static final String USAGE =
+        "roll <keyname> [-provider <provider>] [-strict] [-help]";
     public static final String DESC =
-      "The roll subcommand creates a new version for the specified key\n" +
-      "within the provider indicated using the -provider argument\n";
+        "The roll subcommand creates a new version for the specified key\n" +
+        "within the provider indicated using the -provider argument.\n" +
+        "If -strict is supplied, fail immediately if the provider requires\n" +
+        "a password and none is given.";
 
-    String keyName = null;
+    private String keyName = null;
 
     public RollCommand(String keyName) {
       this.keyName = keyName;
@@ -325,14 +342,11 @@ public class KeyShell extends Configured implements Tool {
       boolean rc = true;
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no valid KeyProviders configured. The key\n" +
-          "has not been rolled. Use the -provider option to specify\n" +
-          "a provider.");
         rc = false;
       }
       if (keyName == null) {
         out.println("Please provide a <keyname>.\n" +
-          "See the usage description by using -help.");
+            "See the usage description by using -help.");
         rc = false;
       }
       return rc;
@@ -368,15 +382,17 @@ public class KeyShell extends Configured implements Tool {
 
   private class DeleteCommand extends Command {
     public static final String USAGE =
-        "delete <keyname> [-provider <provider>] [-f] [-help]";
+        "delete <keyname> [-provider <provider>] [-strict] [-f] [-help]";
     public static final String DESC =
         "The delete subcommand deletes all versions of the key\n" +
         "specified by the <keyname> argument from within the\n" +
         "provider specified by -provider. The command asks for\n" +
-        "user confirmation unless -f is specified.";
+        "user confirmation unless -f is specified. If -strict is\n" +
+        "supplied, fail immediately if the provider requires a\n" +
+        "password and none is given.";
 
-    String keyName = null;
-    boolean cont = true;
+    private String keyName = null;
+    private boolean cont = true;
 
     public DeleteCommand(String keyName) {
       this.keyName = keyName;
@@ -386,8 +402,6 @@ public class KeyShell extends Configured implements Tool {
     public boolean validate() {
       provider = getKeyProvider();
       if (provider == null) {
-        out.println("There are no valid KeyProviders configured. Nothing\n"
-          + "was deleted. Use the -provider option to specify a provider.");
         return false;
       }
       if (keyName == null) {
@@ -438,22 +452,23 @@ public class KeyShell extends Configured implements Tool {
 
   private class CreateCommand extends Command {
     public static final String USAGE =
-      "create <keyname> [-cipher <cipher>] [-size <size>]\n" +
-      "                     [-description <description>]\n" +
-      "                     [-attr <attribute=value>]\n" +
-      "                     [-provider <provider>] [-help]";
+        "create <keyname> [-cipher <cipher>] [-size <size>]\n" +
+        "                     [-description <description>]\n" +
+        "                     [-attr <attribute=value>]\n" +
+        "                     [-provider <provider>] [-strict]\n" +
+        "                     [-help]";
     public static final String DESC =
-      "The create subcommand creates a new key for the name specified\n" +
-      "by the <keyname> argument within the provider specified by the\n" +
-      "-provider argument. You may specify a cipher with the -cipher\n" +
-      "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
-      "The default keysize is 128. You may specify the requested key\n" +
-      "length using the -size argument. Arbitrary attribute=value\n" +
-      "style attributes may be specified using the -attr argument.\n" +
-      "-attr may be specified multiple times, once per attribute.\n";
-
-    final String keyName;
-    final Options options;
+        "The create subcommand creates a new key for the name specified\n" +
+        "by the <keyname> argument within the provider specified by the\n" +
+        "-provider argument. You may specify a cipher with the -cipher\n" +
+        "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
+        "The default keysize is 128. You may specify the requested key\n" +
+        "length using the -size argument. Arbitrary attribute=value\n" +
+        "style attributes may be specified using the -attr argument.\n" +
+        "-attr may be specified multiple times, once per attribute.\n";
+
+    private final String keyName;
+    private final Options options;
 
     public CreateCommand(String keyName, Options options) {
       this.keyName = keyName;
@@ -462,16 +477,24 @@ public class KeyShell extends Configured implements Tool {
 
     public boolean validate() {
       boolean rc = true;
-      provider = getKeyProvider();
-      if (provider == null) {
-        out.println("There are no valid KeyProviders configured. No key\n" +
-          " was created. You can use the -provider option to specify\n" +
-          " a provider to use.");
-        rc = false;
+      try {
+        provider = getKeyProvider();
+        if (provider == null) {
+          rc = false;
+        } else if (provider.needsPassword()) {
+          if (strict) {
+            out.println(provider.noPasswordError());
+            rc = false;
+          } else {
+            out.println(provider.noPasswordWarning());
+          }
+        }
+      } catch (IOException e) {
+        e.printStackTrace(err);
       }
       if (keyName == null) {
         out.println("Please provide a <keyname>. See the usage description" +
-          " with -help.");
+            " with -help.");
         rc = false;
       }
       return rc;

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -111,7 +111,12 @@ public class CommonConfigurationKeysPublic {
   public static final String  FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
   /** Default value for FS_TRASH_INTERVAL_KEY */
   public static final long    FS_TRASH_INTERVAL_DEFAULT = 0;
-
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a>. */
+  public static final String  FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED =
+      "fs.client.resolve.topology.enabled";
+  /** Default value for FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED. */
+  public static final boolean FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT =
+      false;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IO_MAPFILE_BLOOM_SIZE_KEY =
     "io.mapfile.bloom.size";

+ 43 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.util.Collections;
+import java.util.Iterator;
+
+/**
+ * EmptyStorageStatistics is a StorageStatistics implementation which has no
+ * data.
+ */
+class EmptyStorageStatistics extends StorageStatistics {
+  EmptyStorageStatistics(String name) {
+    super(name);
+  }
+
+  public Iterator<LongStatistic> getLongStatistics() {
+    return Collections.emptyIterator();
+  }
+
+  public Long getLong(String key) {
+    return null;
+  }
+
+  public boolean isTracked(String key) {
+    return false;
+  }
+}

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -234,4 +234,13 @@ public class FSDataInputStream extends DataInputStream
           "support unbuffering.");
     }
   }
+
+  /**
+   * String value. Includes the string value of the inner stream
+   * @return the stream
+   */
+  @Override
+  public String toString() {
+    return super.toString() + ": " + in;
+  }
 }

+ 51 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -49,6 +49,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -74,6 +75,7 @@ import org.apache.htrace.core.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
 
 /****************************************************************
@@ -210,7 +212,13 @@ public abstract class FileSystem extends Configured implements Closeable {
    * @param conf the configuration
    */
   public void initialize(URI name, Configuration conf) throws IOException {
-    statistics = getStatistics(name.getScheme(), getClass());    
+    final String scheme;
+    if (name.getScheme() == null || name.getScheme().isEmpty()) {
+      scheme = getDefaultUri(conf).getScheme();
+    } else {
+      scheme = name.getScheme();
+    }
+    statistics = getStatistics(scheme, getClass());
     resolveSymlinks = conf.getBoolean(
         CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
         CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
@@ -3560,7 +3568,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   /**
    * Get the Map of Statistics object indexed by URI Scheme.
    * @return a Map having a key as URI scheme and value as Statistics object
-   * @deprecated use {@link #getAllStatistics} instead
+   * @deprecated use {@link #getGlobalStorageStatistics()}
    */
   @Deprecated
   public static synchronized Map<String, Statistics> getStatistics() {
@@ -3572,8 +3580,10 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
 
   /**
-   * Return the FileSystem classes that have Statistics
+   * Return the FileSystem classes that have Statistics.
+   * @deprecated use {@link #getGlobalStorageStatistics()}
    */
+  @Deprecated
   public static synchronized List<Statistics> getAllStatistics() {
     return new ArrayList<Statistics>(statisticsTable.values());
   }
@@ -3582,13 +3592,25 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Get the statistics for a particular file system
    * @param cls the class to lookup
    * @return a statistics object
+   * @deprecated use {@link #getGlobalStorageStatistics()}
    */
-  public static synchronized 
-  Statistics getStatistics(String scheme, Class<? extends FileSystem> cls) {
+  @Deprecated
+  public static synchronized Statistics getStatistics(final String scheme,
+      Class<? extends FileSystem> cls) {
+    checkArgument(scheme != null,
+        "No statistics is allowed for a file system with null scheme!");
     Statistics result = statisticsTable.get(cls);
     if (result == null) {
-      result = new Statistics(scheme);
-      statisticsTable.put(cls, result);
+      final Statistics newStats = new Statistics(scheme);
+      statisticsTable.put(cls, newStats);
+      result = newStats;
+      GlobalStorageStatistics.INSTANCE.put(scheme,
+          new StorageStatisticsProvider() {
+            @Override
+            public StorageStatistics provide() {
+              return new FileSystemStorageStatistics(scheme, newStats);
+            }
+          });
     }
     return result;
   }
@@ -3628,4 +3650,26 @@ public abstract class FileSystem extends Configured implements Closeable {
   public static void enableSymlinks() {
     symlinksEnabled = true;
   }
+
+  /**
+   * Get the StorageStatistics for this FileSystem object.  These statistics are
+   * per-instance.  They are not shared with any other FileSystem object.
+   *
+   * <p>This is a default method which is intended to be overridden by
+   * subclasses. The default implementation returns an empty storage statistics
+   * object.</p>
+   *
+   * @return    The StorageStatistics for this FileSystem instance.
+   *            Will never be null.
+   */
+  public StorageStatistics getStorageStatistics() {
+    return new EmptyStorageStatistics(getUri().toString());
+  }
+
+  /**
+   * Get the global storage statistics.
+   */
+  public static GlobalStorageStatistics getGlobalStorageStatistics() {
+    return GlobalStorageStatistics.INSTANCE;
+  }
 }

+ 136 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java

@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData;
+
+/**
+ * A basic StorageStatistics instance which simply returns data from
+ * FileSystem#Statistics.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class FileSystemStorageStatistics extends StorageStatistics {
+  /**
+   * The per-class FileSystem statistics.
+   */
+  private final FileSystem.Statistics stats;
+
+  private static final String[] KEYS = {
+      "bytesRead",
+      "bytesWritten",
+      "readOps",
+      "largeReadOps",
+      "writeOps",
+      "bytesReadLocalHost",
+      "bytesReadDistanceOfOneOrTwo",
+      "bytesReadDistanceOfThreeOrFour",
+      "bytesReadDistanceOfFiveOrLarger"
+  };
+
+  private static class LongStatisticIterator
+      implements Iterator<LongStatistic> {
+    private final StatisticsData data;
+
+    private int keyIdx;
+
+    LongStatisticIterator(StatisticsData data) {
+      this.data = data;
+      this.keyIdx = 0;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return (this.keyIdx < KEYS.length);
+    }
+
+    @Override
+    public LongStatistic next() {
+      if (this.keyIdx >= KEYS.length) {
+        throw new NoSuchElementException();
+      }
+      String key = KEYS[this.keyIdx++];
+      Long val = fetch(data, key);
+      return new LongStatistic(key, val.longValue());
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  private static Long fetch(StatisticsData data, String key) {
+    switch (key) {
+    case "bytesRead":
+      return data.getBytesRead();
+    case "bytesWritten":
+      return data.getBytesWritten();
+    case "readOps":
+      return Long.valueOf(data.getReadOps());
+    case "largeReadOps":
+      return Long.valueOf(data.getLargeReadOps());
+    case "writeOps":
+      return Long.valueOf(data.getWriteOps());
+    case "bytesReadLocalHost":
+      return data.getBytesReadLocalHost();
+    case "bytesReadDistanceOfOneOrTwo":
+      return data.getBytesReadDistanceOfOneOrTwo();
+    case "bytesReadDistanceOfThreeOrFour":
+      return data.getBytesReadDistanceOfThreeOrFour();
+    case "bytesReadDistanceOfFiveOrLarger":
+      return data.getBytesReadDistanceOfFiveOrLarger();
+    default:
+      return null;
+    }
+  }
+
+  FileSystemStorageStatistics(String name, FileSystem.Statistics stats) {
+    super(name);
+    this.stats = stats;
+  }
+
+  @Override
+  public Iterator<LongStatistic> getLongStatistics() {
+    return new LongStatisticIterator(stats.getData());
+  }
+
+  @Override
+  public Long getLong(String key) {
+    return fetch(stats.getData(), key);
+  }
+
+  /**
+   * Return true if a statistic is being tracked.
+   *
+   * @return         True only if the statistic is being tracked.
+   */
+  public boolean isTracked(String key) {
+    for (String k: KEYS) {
+      if (k.equals(key)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

+ 11 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -50,12 +50,12 @@ public class FsShell extends Configured implements Tool {
 
   private FileSystem fs;
   private Trash trash;
+  private Help help;
   protected CommandFactory commandFactory;
 
   private final String usagePrefix =
     "Usage: hadoop fs [generic options]";
 
-  private Tracer tracer;
   static final String SHELL_HTRACE_PREFIX = "fs.shell.htrace.";
 
   /**
@@ -89,6 +89,13 @@ public class FsShell extends Configured implements Tool {
     }
     return this.trash;
   }
+
+  protected Help getHelp() throws IOException {
+    if (this.help == null){
+      this.help = new Help();
+    }
+    return this.help;
+  }
   
   protected void init() throws IOException {
     getConf().setQuietMode(true);
@@ -98,9 +105,6 @@ public class FsShell extends Configured implements Tool {
       commandFactory.addObject(new Usage(), "-usage");
       registerCommands(commandFactory);
     }
-    this.tracer = new Tracer.Builder("FsShell").
-        conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())).
-        build();
   }
 
   protected void registerCommands(CommandFactory factory) {
@@ -296,6 +300,9 @@ public class FsShell extends Configured implements Tool {
   public int run(String argv[]) throws Exception {
     // initialize FsShell
     init();
+    Tracer tracer = new Tracer.Builder("FsShell").
+        conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())).
+        build();
     int exitCode = -1;
     if (argv.length < 1) {
       printUsage(System.err);

+ 130 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java

@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.NoSuchElementException;
+import java.util.TreeMap;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Stores global storage statistics objects.
+ */
+@InterfaceAudience.Public
+public enum GlobalStorageStatistics {
+  /**
+   * The GlobalStorageStatistics singleton.
+   */
+  INSTANCE;
+
+  /**
+   * A map of all global StorageStatistics objects, indexed by name.
+   */
+  private final NavigableMap<String, StorageStatistics> map = new TreeMap<>();
+
+  /**
+   * A callback API for creating new StorageStatistics instances.
+   */
+  public interface StorageStatisticsProvider {
+    StorageStatistics provide();
+  }
+
+  /**
+   * Get the StorageStatistics object with the given name.
+   *
+   * @param name        The storage statistics object name.
+   * @return            The StorageStatistics object with the given name, or
+   *                      null if there is none.
+   */
+  public synchronized StorageStatistics get(String name) {
+    return name == null ? null : map.get(name);
+  }
+
+  /**
+   * Create or return the StorageStatistics object with the given name.
+   *
+   * @param name        The storage statistics object name.
+   * @param provider    An object which can create a new StorageStatistics
+   *                      object if needed.
+   * @return            The StorageStatistics object with the given name.
+   * @throws RuntimeException  If the StorageStatisticsProvider provides a new
+   *                           StorageStatistics object with the wrong name.
+   */
+  public synchronized StorageStatistics put(String name,
+      StorageStatisticsProvider provider) {
+    Preconditions.checkNotNull(name,
+        "Storage statistics can not have a null name!");
+    StorageStatistics stats = map.get(name);
+    if (stats != null) {
+      return stats;
+    }
+    stats = provider.provide();
+    if (!stats.getName().equals(name)) {
+      throw new RuntimeException("StorageStatisticsProvider for " + name +
+          " provided a StorageStatistics object for " + stats.getName() +
+          " instead.");
+    }
+    map.put(name, stats);
+    return stats;
+  }
+
+  /**
+   * Get an iterator that we can use to iterate throw all the global storage
+   * statistics objects.
+   */
+  synchronized public Iterator<StorageStatistics> iterator() {
+    Entry<String, StorageStatistics> first = map.firstEntry();
+    return new StorageIterator((first == null) ? null : first.getValue());
+  }
+
+  private class StorageIterator implements Iterator<StorageStatistics> {
+    private StorageStatistics next = null;
+
+    StorageIterator(StorageStatistics first) {
+      this.next = first;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return (next != null);
+    }
+
+    @Override
+    public StorageStatistics next() {
+      if (next == null) {
+        throw new NoSuchElementException();
+      }
+      synchronized (GlobalStorageStatistics.this) {
+        StorageStatistics cur = next;
+        Entry<String, StorageStatistics> nextEntry =
+            map.higherEntry(cur.getName());
+        next = (nextEntry == null) ? null : nextEntry.getValue();
+        return cur;
+      }
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  }
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java

@@ -296,7 +296,7 @@ public class QuotaUsage {
       quotaStr = formatSize(quota, hOption);
       quotaRem = formatSize(quota-fileAndDirectoryCount, hOption);
     }
-    if (spaceQuota > 0) {
+    if (spaceQuota >= 0) {
       spaceQuotaStr = formatSize(spaceQuota, hOption);
       spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
     }
@@ -314,7 +314,7 @@ public class QuotaUsage {
       String quotaStr = QUOTA_NONE;
       String quotaRem = QUOTA_INF;
 
-      if (typeQuota > 0) {
+      if (typeQuota >= 0) {
         quotaStr = formatSize(typeQuota, hOption);
         quotaRem = formatSize(typeQuota - typeConsumed, hOption);
       }

+ 93 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java

@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.util.Iterator;
+
+/**
+ * StorageStatistics contains statistics data for a FileSystem or FileContext
+ * instance.
+ */
+@InterfaceAudience.Public
+public abstract class StorageStatistics {
+  /**
+   * A 64-bit storage statistic.
+   */
+  public static class LongStatistic {
+    private final String name;
+    private final long value;
+
+    public LongStatistic(String name, long value) {
+      this.name = name;
+      this.value = value;
+    }
+
+    /**
+     * @return    The name of this statistic.
+     */
+    public String getName() {
+      return name;
+    }
+
+    /**
+     * @return    The value of this statistic.
+     */
+    public long getValue() {
+      return value;
+    }
+  }
+
+  private final String name;
+
+  public StorageStatistics(String name) {
+    this.name = name;
+  }
+
+  /**
+   * Get the name of this StorageStatistics object.
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Get an iterator over all the currently tracked long statistics.
+   *
+   * The values returned will depend on the type of FileSystem or FileContext
+   * object.  The values do not necessarily reflect a snapshot in time.
+   */
+  public abstract Iterator<LongStatistic> getLongStatistics();
+
+  /**
+   * Get the value of a statistic.
+   *
+   * @return         null if the statistic is not being tracked or is not a
+   *                     long statistic.
+   *                 The value of the statistic, otherwise.
+   */
+  public abstract Long getLong(String key);
+
+  /**
+   * Return true if a statistic is being tracked.
+   *
+   * @return         True only if the statistic is being tracked.
+   */
+  public abstract boolean isTracked(String key);
+}

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -56,7 +56,6 @@ public class TrashPolicyDefault extends TrashPolicy {
     LogFactory.getLog(TrashPolicyDefault.class);
 
   private static final Path CURRENT = new Path("Current");
-  private static final Path TRASH = new Path(".Trash/");  
 
   private static final FsPermission PERMISSION =
     new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);

+ 113 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A StorageStatistics instance which combines the outputs of several other
+ * StorageStatistics instances.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class UnionStorageStatistics extends StorageStatistics {
+  /**
+   * The underlying StorageStatistics.
+   */
+  private final StorageStatistics[] stats;
+
+  private class LongStatisticIterator implements Iterator<LongStatistic> {
+    private int statIdx;
+
+    private Iterator<LongStatistic> cur;
+
+    LongStatisticIterator() {
+      this.statIdx = 0;
+      this.cur = null;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return (getIter() != null);
+    }
+
+    private Iterator<LongStatistic> getIter() {
+      while ((cur == null) || (!cur.hasNext())) {
+        if (stats.length >= statIdx) {
+          return null;
+        }
+        cur = stats[statIdx++].getLongStatistics();
+      }
+      return cur;
+    }
+
+    @Override
+    public LongStatistic next() {
+      Iterator<LongStatistic> iter = getIter();
+      if (iter == null) {
+        throw new NoSuchElementException();
+      }
+      return iter.next();
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  public UnionStorageStatistics(String name, StorageStatistics[] stats) {
+    super(name);
+    this.stats = stats;
+  }
+
+  @Override
+  public Iterator<LongStatistic> getLongStatistics() {
+    return new LongStatisticIterator();
+  }
+
+  @Override
+  public Long getLong(String key) {
+    for (int i = 0; i < stats.length; i++) {
+      Long val = stats[i].getLong(key);
+      if (val != null) {
+        return val;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Return true if a statistic is being tracked.
+   *
+   * @return         True only if the statistic is being tracked.
+   */
+  @Override
+  public boolean isTracked(String key) {
+    for (int i = 0; i < stats.length; i++) {
+      if (stats[i].isTracked(key)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

+ 74 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputBuffer.java

@@ -18,13 +18,14 @@
 
 package org.apache.hadoop.io;
 
-import java.io.*;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** A reusable {@link DataInput} implementation that reads from an in-memory
- * buffer.
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+
+/** A reusable {@link java.io.DataInput} implementation
+ * that reads from an in-memory buffer.
  *
  * <p>This saves memory over creating a new DataInputStream and
  * ByteArrayInputStream each time data is read.
@@ -56,9 +57,75 @@ public class DataInputBuffer extends DataInputStream {
       this.pos = start;
     }
 
-    public byte[] getData() { return buf; }
-    public int getPosition() { return pos; }
-    public int getLength() { return count; }
+    public byte[] getData() {
+      return buf;
+    }
+
+    public int getPosition() {
+      return pos;
+    }
+
+    public int getLength() {
+      return count;
+    }
+
+    /* functions below comes verbatim from
+     hive.common.io.NonSyncByteArrayInputStream */
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int read() {
+      return (pos < count) ? (buf[pos++] & 0xff) : -1;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int read(byte[] b, int off, int len) {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      }
+      if (pos >= count) {
+        return -1;
+      }
+      if (pos + len > count) {
+        len = count - pos;
+      }
+      if (len <= 0) {
+        return 0;
+      }
+      System.arraycopy(buf, pos, b, off, len);
+      pos += len;
+      return len;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public long skip(long n) {
+      if (pos + n > count) {
+        n = count - pos;
+      }
+      if (n < 0) {
+        return 0;
+      }
+      pos += n;
+      return n;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int available() {
+      return count - pos;
+    }
   }
 
   private Buffer buffer;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -234,7 +234,7 @@ public class IOUtils {
   }
   
   /**
-   * Close the Closeable objects and <b>ignore</b> any {@link IOException} or 
+   * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
    * null pointers. Must only be used for cleanup in exception handlers.
    *
    * @param log the log to record problems to at debug level. Can be null.
@@ -255,7 +255,7 @@ public class IOUtils {
   }
 
   /**
-   * Closes the stream ignoring {@link IOException}.
+   * Closes the stream ignoring {@link Throwable}.
    * Must only be called in cleaning up from exception handlers.
    *
    * @param stream the Stream to close

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -1115,9 +1115,12 @@ public class SequenceFile {
             CompressionOption readerCompressionOption = new CompressionOption(
                 reader.getCompressionType(), reader.getCompressionCodec());
 
+            // Codec comparison will be ignored if the compression is NONE
             if (readerCompressionOption.value != compressionTypeOption.value
-                || !readerCompressionOption.codec.getClass().getName()
-                    .equals(compressionTypeOption.codec.getClass().getName())) {
+                || (readerCompressionOption.value != CompressionType.NONE
+                    && readerCompressionOption.codec
+                        .getClass() != compressionTypeOption.codec
+                            .getClass())) {
               throw new IllegalArgumentException(
                   "Compression option provided does not match the file");
             }

+ 30 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java

@@ -22,22 +22,36 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.compress.Decompressor;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DecompressorStream extends CompressionInputStream {
+  /**
+   * The maximum input buffer size.
+   */
+  private static final int MAX_INPUT_BUFFER_SIZE = 512;
+  /**
+   * MAX_SKIP_BUFFER_SIZE is used to determine the maximum buffer size to
+   * use when skipping. See {@link java.io.InputStream}.
+   */
+  private static final int MAX_SKIP_BUFFER_SIZE = 2048;
+
+  private byte[] skipBytes;
+  private byte[] oneByte = new byte[1];
+
   protected Decompressor decompressor = null;
   protected byte[] buffer;
   protected boolean eof = false;
   protected boolean closed = false;
   private int lastBytesSent = 0;
 
-  public DecompressorStream(InputStream in, Decompressor decompressor,
-                            int bufferSize)
-  throws IOException {
+  @VisibleForTesting
+  DecompressorStream(InputStream in, Decompressor decompressor,
+                            int bufferSize, int skipBufferSize)
+      throws IOException {
     super(in);
 
     if (decompressor == null) {
@@ -48,11 +62,18 @@ public class DecompressorStream extends CompressionInputStream {
 
     this.decompressor = decompressor;
     buffer = new byte[bufferSize];
+    skipBytes = new byte[skipBufferSize];
+  }
+
+  public DecompressorStream(InputStream in, Decompressor decompressor,
+                            int bufferSize)
+      throws IOException {
+    this(in, decompressor, bufferSize, MAX_SKIP_BUFFER_SIZE);
   }
 
   public DecompressorStream(InputStream in, Decompressor decompressor)
-  throws IOException {
-    this(in, decompressor, 512);
+      throws IOException {
+    this(in, decompressor, MAX_INPUT_BUFFER_SIZE);
   }
 
   /**
@@ -64,8 +85,7 @@ public class DecompressorStream extends CompressionInputStream {
   protected DecompressorStream(InputStream in) throws IOException {
     super(in);
   }
-  
-  private byte[] oneByte = new byte[1];
+
   @Override
   public int read() throws IOException {
     checkStream();
@@ -86,7 +106,7 @@ public class DecompressorStream extends CompressionInputStream {
   }
 
   protected int decompress(byte[] b, int off, int len) throws IOException {
-    int n = 0;
+    int n;
 
     while ((n = decompressor.decompress(b, off, len)) == 0) {
       if (decompressor.needsDictionary()) {
@@ -170,7 +190,6 @@ public class DecompressorStream extends CompressionInputStream {
     decompressor.reset();
   }
 
-  private byte[] skipBytes = new byte[512];
   @Override
   public long skip(long n) throws IOException {
     // Sanity checks
@@ -178,7 +197,7 @@ public class DecompressorStream extends CompressionInputStream {
       throw new IllegalArgumentException("negative skip length");
     }
     checkStream();
-    
+
     // Read 'n' bytes
     int skipped = 0;
     while (skipped < n) {

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java

@@ -37,10 +37,21 @@ public interface FailoverProxyProvider<T> extends Closeable {
      * provides information for debugging purposes.
      */
     public final String proxyInfo;
+
     public ProxyInfo(T proxy, String proxyInfo) {
       this.proxy = proxy;
       this.proxyInfo = proxyInfo;
     }
+
+    public String getString(String methodName) {
+      return proxy.getClass().getSimpleName() + "." + methodName
+          + " over " + proxyInfo;
+    }
+
+    @Override
+    public String toString() {
+      return proxy.getClass().getSimpleName() + " over " + proxyInfo;
+    }
   }
 
   /**

+ 180 - 170
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -17,48 +17,137 @@
  */
 package org.apache.hadoop.io.retry;
 
-import java.io.IOException;
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcConstants;
-import org.apache.hadoop.ipc.RpcInvocationHandler;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
 
 /**
- * This class implements RpcInvocationHandler and supports retry on the client 
- * side.
+ * A {@link RpcInvocationHandler} which supports client side retry .
  */
 @InterfaceAudience.Private
 public class RetryInvocationHandler<T> implements RpcInvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
-  private final FailoverProxyProvider<T> proxyProvider;
 
-  /**
-   * The number of times the associated proxyProvider has ever been failed over.
-   */
-  private long proxyProviderFailoverCount = 0;
+  private static class Counters {
+    /** Counter for retries. */
+    private int retries;
+    /** Counter for method invocation has been failed over. */
+    private int failovers;
+  }
+
+  private static class ProxyDescriptor<T> {
+    private final FailoverProxyProvider<T> fpp;
+    /** Count the associated proxy provider has ever been failed over. */
+    private long failoverCount = 0;
+
+    private ProxyInfo<T> proxyInfo;
+
+    ProxyDescriptor(FailoverProxyProvider<T> fpp) {
+      this.fpp = fpp;
+      this.proxyInfo = fpp.getProxy();
+    }
+
+    synchronized ProxyInfo<T> getProxyInfo() {
+      return proxyInfo;
+    }
+
+    synchronized T getProxy() {
+      return proxyInfo.proxy;
+    }
+
+    synchronized long getFailoverCount() {
+      return failoverCount;
+    }
+
+    synchronized void failover(long expectedFailoverCount, Method method) {
+      // Make sure that concurrent failed invocations only cause a single
+      // actual failover.
+      if (failoverCount == expectedFailoverCount) {
+        fpp.performFailover(proxyInfo.proxy);
+        failoverCount++;
+      } else {
+        LOG.warn("A failover has occurred since the start of "
+            + proxyInfo.getString(method.getName()));
+      }
+      proxyInfo = fpp.getProxy();
+    }
+
+    boolean idempotentOrAtMostOnce(Method method) throws NoSuchMethodException {
+      final Method m = fpp.getInterface()
+          .getMethod(method.getName(), method.getParameterTypes());
+      return m.isAnnotationPresent(Idempotent.class)
+          || m.isAnnotationPresent(AtMostOnce.class);
+    }
+
+    void close() throws IOException {
+      fpp.close();
+    }
+  }
+
+  private static class RetryInfo {
+    private final long delay;
+    private final RetryAction failover;
+    private final RetryAction fail;
+
+    RetryInfo(long delay, RetryAction failover, RetryAction fail) {
+      this.delay = delay;
+      this.failover = failover;
+      this.fail = fail;
+    }
+
+    static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
+        Counters counters, boolean idempotentOrAtMostOnce) throws Exception {
+      long maxRetryDelay = 0;
+      RetryAction failover = null;
+      RetryAction retry = null;
+      RetryAction fail = null;
+
+      final Iterable<Exception> exceptions = e instanceof MultiException ?
+          ((MultiException) e).getExceptions().values()
+          : Collections.singletonList(e);
+      for (Exception exception : exceptions) {
+        final RetryAction a = policy.shouldRetry(exception,
+            counters.retries, counters.failovers, idempotentOrAtMostOnce);
+        if (a.action == RetryAction.RetryDecision.FAIL) {
+          fail = a;
+        } else {
+          // must be a retry or failover
+          if (a.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
+            failover = a;
+          } else {
+            retry = a;
+          }
+          if (a.delayMillis > maxRetryDelay) {
+            maxRetryDelay = a.delayMillis;
+          }
+        }
+      }
+
+      return new RetryInfo(maxRetryDelay, failover,
+          failover == null && retry == null? fail: null);
+    }
+  }
+
+  private final ProxyDescriptor<T> proxyDescriptor;
+
   private volatile boolean hasMadeASuccessfulCall = false;
   
   private final RetryPolicy defaultPolicy;
   private final Map<String,RetryPolicy> methodNameToPolicyMap;
-  private ProxyInfo<T> currentProxy;
 
   protected RetryInvocationHandler(FailoverProxyProvider<T> proxyProvider,
       RetryPolicy retryPolicy) {
@@ -68,39 +157,40 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
   protected RetryInvocationHandler(FailoverProxyProvider<T> proxyProvider,
       RetryPolicy defaultPolicy,
       Map<String, RetryPolicy> methodNameToPolicyMap) {
-    this.proxyProvider = proxyProvider;
+    this.proxyDescriptor = new ProxyDescriptor<>(proxyProvider);
     this.defaultPolicy = defaultPolicy;
     this.methodNameToPolicyMap = methodNameToPolicyMap;
-    this.currentProxy = proxyProvider.getProxy();
+  }
+
+  private RetryPolicy getRetryPolicy(Method method) {
+    final RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
+    return policy != null? policy: defaultPolicy;
   }
 
   @Override
   public Object invoke(Object proxy, Method method, Object[] args)
-    throws Throwable {
-    RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
-    if (policy == null) {
-      policy = defaultPolicy;
-    }
-    
-    // The number of times this method invocation has been failed over.
-    int invocationFailoverCount = 0;
-    final boolean isRpc = isRpcInvocation(currentProxy.proxy);
+      throws Throwable {
+    final boolean isRpc = isRpcInvocation(proxyDescriptor.getProxy());
     final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID;
-    int retries = 0;
+    return invoke(method, args, isRpc, callId, new Counters());
+  }
+
+  private Object invoke(final Method method, final Object[] args,
+      final boolean isRpc, final int callId, final Counters counters)
+      throws Throwable {
+    final RetryPolicy policy = getRetryPolicy(method);
+
     while (true) {
       // The number of times this invocation handler has ever been failed over,
       // before this method invocation attempt. Used to prevent concurrent
       // failed method invocations from triggering multiple failover attempts.
-      long invocationAttemptFailoverCount;
-      synchronized (proxyProvider) {
-        invocationAttemptFailoverCount = proxyProviderFailoverCount;
-      }
+      final long failoverCount = proxyDescriptor.getFailoverCount();
 
       if (isRpc) {
-        Client.setCallIdAndRetryCount(callId, retries);
+        Client.setCallIdAndRetryCount(callId, counters.retries);
       }
       try {
-        Object ret = invokeMethod(method, args);
+        final Object ret = invokeMethod(method, args);
         hasMadeASuccessfulCall = true;
         return ret;
       } catch (Exception ex) {
@@ -108,153 +198,74 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
           // If interrupted, do not retry.
           throw ex;
         }
-        boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
-            .getMethod(method.getName(), method.getParameterTypes())
-            .isAnnotationPresent(Idempotent.class);
-        if (!isIdempotentOrAtMostOnce) {
-          isIdempotentOrAtMostOnce = proxyProvider.getInterface()
-              .getMethod(method.getName(), method.getParameterTypes())
-              .isAnnotationPresent(AtMostOnce.class);
-        }
-        List<RetryAction> actions = extractActions(policy, ex, retries++,
-                invocationFailoverCount, isIdempotentOrAtMostOnce);
-        RetryAction failAction = getFailAction(actions);
-        if (failAction != null) {
-          // fail.
-          if (failAction.reason != null) {
-            LOG.warn("Exception while invoking " + currentProxy.proxy.getClass()
-                + "." + method.getName() + " over " + currentProxy.proxyInfo
-                + ". Not retrying because " + failAction.reason, ex);
-          }
-          throw ex;
-        } else { // retry or failover
-          // avoid logging the failover if this is the first call on this
-          // proxy object, and we successfully achieve the failover without
-          // any flip-flopping
-          boolean worthLogging = 
-            !(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
-          worthLogging |= LOG.isDebugEnabled();
-          RetryAction failOverAction = getFailOverAction(actions);
-          long delay = getDelayMillis(actions);
-
-          if (worthLogging) {
-            String msg = "Exception while invoking " + method.getName()
-                + " of class " + currentProxy.proxy.getClass().getSimpleName()
-                + " over " + currentProxy.proxyInfo;
-
-            if (invocationFailoverCount > 0) {
-              msg += " after " + invocationFailoverCount + " fail over attempts"; 
-            }
-
-            if (failOverAction != null) {
-              // failover
-              msg += ". Trying to fail over " + formatSleepMessage(delay);
-            } else {
-              // retry
-              msg += ". Retrying " + formatSleepMessage(delay);
-            }
-            LOG.info(msg, ex);
-          }
-
-          if (delay > 0) {
-            Thread.sleep(delay);
-          }
-
-          if (failOverAction != null) {
-            // Make sure that concurrent failed method invocations only cause a
-            // single actual fail over.
-            synchronized (proxyProvider) {
-              if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
-                proxyProvider.performFailover(currentProxy.proxy);
-                proxyProviderFailoverCount++;
-              } else {
-                LOG.warn("A failover has occurred since the start of this method"
-                    + " invocation attempt.");
-              }
-              currentProxy = proxyProvider.getProxy();
-            }
-            invocationFailoverCount++;
-          }
-        }
+        handleException(method, policy, failoverCount, counters, ex);
       }
     }
   }
 
-  /**
-   * Obtain a retry delay from list of RetryActions.
-   */
-  private long getDelayMillis(List<RetryAction> actions) {
-    long retVal = 0;
-    for (RetryAction action : actions) {
-      if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY ||
-              action.action == RetryAction.RetryDecision.RETRY) {
-        if (action.delayMillis > retVal) {
-          retVal = action.delayMillis;
-        }
+  private void handleException(final Method method, final RetryPolicy policy,
+      final long expectedFailoverCount, final Counters counters,
+      final Exception ex) throws Exception {
+    final RetryInfo retryInfo = RetryInfo.newRetryInfo(policy, ex, counters,
+        proxyDescriptor.idempotentOrAtMostOnce(method));
+    counters.retries++;
+
+    if (retryInfo.fail != null) {
+      // fail.
+      if (retryInfo.fail.reason != null) {
+        LOG.warn("Exception while invoking "
+            + proxyDescriptor.getProxyInfo().getString(method.getName())
+            + ". Not retrying because " + retryInfo.fail.reason, ex);
       }
+      throw ex;
     }
-    return retVal;
-  }
 
-  /**
-   * Return the first FAILOVER_AND_RETRY action.
-   */
-  private RetryAction getFailOverAction(List<RetryAction> actions) {
-    for (RetryAction action : actions) {
-      if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
-        return action;
-      }
+    // retry
+    final boolean isFailover = retryInfo.failover != null;
+
+    log(method, isFailover, counters.failovers, retryInfo.delay, ex);
+
+    if (retryInfo.delay > 0) {
+      Thread.sleep(retryInfo.delay);
     }
-    return null;
-  }
 
-  /**
-   * Return the last FAIL action.. only if there are no RETRY actions.
-   */
-  private RetryAction getFailAction(List<RetryAction> actions) {
-    RetryAction fAction = null;
-    for (RetryAction action : actions) {
-      if (action.action == RetryAction.RetryDecision.FAIL) {
-        fAction = action;
-      } else {
-        // Atleast 1 RETRY
-        return null;
-      }
+    if (isFailover) {
+      proxyDescriptor.failover(expectedFailoverCount, method);
+      counters.failovers++;
     }
-    return fAction;
   }
 
-  private List<RetryAction> extractActions(RetryPolicy policy, Exception ex,
-                                           int i, int invocationFailoverCount,
-                                           boolean isIdempotentOrAtMostOnce)
-          throws Exception {
-    List<RetryAction> actions = new LinkedList<>();
-    if (ex instanceof MultiException) {
-      for (Exception th : ((MultiException) ex).getExceptions().values()) {
-        actions.add(policy.shouldRetry(th, i, invocationFailoverCount,
-                isIdempotentOrAtMostOnce));
-      }
-    } else {
-      actions.add(policy.shouldRetry(ex, i,
-              invocationFailoverCount, isIdempotentOrAtMostOnce));
+  private void log(final Method method, final boolean isFailover,
+      final int failovers, final long delay, final Exception ex) {
+    // log info if this has made some successful calls or
+    // this is not the first failover
+    final boolean info = hasMadeASuccessfulCall || failovers != 0;
+    if (!info && !LOG.isDebugEnabled()) {
+      return;
     }
-    return actions;
-  }
 
-  private static String formatSleepMessage(long millis) {
-    if (millis > 0) {
-      return "after sleeping for " + millis + "ms.";
+    final StringBuilder b = new StringBuilder()
+        .append("Exception while invoking ")
+        .append(proxyDescriptor.getProxyInfo().getString(method.getName()));
+    if (failovers > 0) {
+      b.append(" after ").append(failovers).append(" failover attempts");
+    }
+    b.append(isFailover? ". Trying to failover ": ". Retrying ");
+    b.append(delay > 0? "after sleeping for " + delay + "ms.": "immediately.");
+
+    if (info) {
+      LOG.info(b.toString(), ex);
     } else {
-      return "immediately.";
+      LOG.debug(b.toString(), ex);
     }
   }
-  
+
   protected Object invokeMethod(Method method, Object[] args) throws Throwable {
     try {
       if (!method.isAccessible()) {
         method.setAccessible(true);
       }
-      return method.invoke(currentProxy.proxy, args);
+      return method.invoke(proxyDescriptor.getProxy(), args);
     } catch (InvocationTargetException e) {
       throw e.getCause();
     }
@@ -274,12 +285,11 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
 
   @Override
   public void close() throws IOException {
-    proxyProvider.close();
+    proxyDescriptor.close();
   }
 
   @Override //RpcInvocationHandler
   public ConnectionId getConnectionId() {
-    return RPC.getConnectionIdForProxy(currentProxy.proxy);
+    return RPC.getConnectionIdForProxy(proxyDescriptor.getProxy());
   }
-
 }

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java

@@ -206,11 +206,6 @@ public class DecayRpcScheduler implements RpcScheduler,
     this.backOffResponseTimeThresholds =
         parseBackOffResponseTimeThreshold(ns, conf, numLevels);
 
-    // Setup delay timer
-    Timer timer = new Timer();
-    DecayTask task = new DecayTask(this, timer);
-    timer.scheduleAtFixedRate(task, decayPeriodMillis, decayPeriodMillis);
-
     // Setup response time metrics
     responseTimeTotalInCurrWindow = new AtomicLongArray(numLevels);
     responseTimeCountInCurrWindow = new AtomicLongArray(numLevels);
@@ -223,6 +218,11 @@ public class DecayRpcScheduler implements RpcScheduler,
     Preconditions.checkArgument(topUsersCount > 0,
         "the number of top users for scheduler metrics must be at least 1");
 
+    // Setup delay timer
+    Timer timer = new Timer();
+    DecayTask task = new DecayTask(this, timer);
+    timer.scheduleAtFixedRate(task, decayPeriodMillis, decayPeriodMillis);
+
     MetricsProxy prox = MetricsProxy.getInstance(ns, numLevels);
     prox.setDelegate(this);
     prox.registerMetrics2Source(ns);
@@ -821,9 +821,10 @@ public class DecayRpcScheduler implements RpcScheduler,
     final int topCallerCount = 10;
     TopN topNCallers = getTopCallers(topCallerCount);
     Map<Object, Integer> decisions = scheduleCacheRef.get();
-    for (int i=0; i < topNCallers.size(); i++) {
+    final int actualCallerCount = topNCallers.size();
+    for (int i = 0; i < actualCallerCount; i++) {
       NameValuePair entry =  topNCallers.poll();
-      String topCaller = "Top." + (topCallerCount - i) + "." +
+      String topCaller = "Top." + (actualCallerCount - i) + "." +
           "Caller(" + entry.getName() + ")";
       String topCallerVolume = topCaller + ".Volume";
       String topCallerPriority = topCaller + ".Priority";

+ 11 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -142,27 +142,33 @@ public class LogLevel {
     private static void process(org.apache.log4j.Logger log, String level,
         PrintWriter out) throws IOException {
       if (level != null) {
-        if (!level.equals(org.apache.log4j.Level.toLevel(level).toString())) {
-          out.println(MARKER + "Bad level : <b>" + level + "</b><br />");
+        if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level)
+            .toString())) {
+          out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
         } else {
           log.setLevel(org.apache.log4j.Level.toLevel(level));
           out.println(MARKER + "Setting Level to " + level + " ...<br />");
         }
       }
       out.println(MARKER
-          + "Effective level: <b>" + log.getEffectiveLevel() + "</b><br />");
+          + "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
     }
 
     private static void process(java.util.logging.Logger log, String level,
         PrintWriter out) throws IOException {
       if (level != null) {
-        log.setLevel(java.util.logging.Level.parse(level));
+        String levelToUpperCase = level.toUpperCase();
+        try {
+          log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
+        } catch (IllegalArgumentException e) {
+          out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
+        }
         out.println(MARKER + "Setting Level to " + level + " ...<br />");
       }
 
       java.util.logging.Level lev;
       for(; (lev = log.getLevel()) == null; log = log.getParent());
-      out.println(MARKER + "Effective level: <b>" + lev + "</b><br />");
+      out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
     }
   }
 }

+ 141 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricStringBuilder.java

@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Build a string dump of the metrics.
+ *
+ * The {@link #toString()} operator dumps out all values collected.
+ *
+ * Every entry is formatted as
+ * {@code prefix + name + separator + value + suffix}
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MetricStringBuilder extends MetricsRecordBuilder {
+
+  private final StringBuilder builder = new StringBuilder(256);
+
+  private final String prefix;
+  private final String suffix;
+  private final String separator;
+  private final MetricsCollector parent;
+
+  /**
+   * Build an instance.
+   * @param parent parent collector. Unused in this instance; only used for
+   * the {@link #parent()} method
+   * @param prefix string before each entry
+   * @param separator separator between name and value
+   * @param suffix suffix after each entry
+   */
+  public MetricStringBuilder(MetricsCollector parent,
+      String prefix,
+      String separator,
+      String suffix) {
+    this.parent = parent;
+    this.prefix = prefix;
+    this.suffix = suffix;
+    this.separator = separator;
+  }
+
+  public MetricStringBuilder add(MetricsInfo info, Object value) {
+    return tuple(info.name(), value.toString());
+  }
+
+  /**
+   * Add any key,val pair to the string, between the prefix and suffix,
+   * separated by the separator.
+   * @param key key
+   * @param value value
+   * @return this instance
+   */
+  public MetricStringBuilder tuple(String key, String value) {
+    builder.append(prefix)
+        .append(key)
+        .append(separator)
+        .append(value)
+        .append(suffix);
+    return this;
+  }
+
+  @Override
+  public MetricsRecordBuilder tag(MetricsInfo info, String value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder add(MetricsTag tag) {
+    return tuple(tag.name(), tag.value());
+  }
+
+  @Override
+  public MetricsRecordBuilder add(AbstractMetric metric) {
+    add(metric.info(), metric.toString());
+    return this;
+  }
+
+  @Override
+  public MetricsRecordBuilder setContext(String value) {
+    return tuple("context", value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addCounter(MetricsInfo info, int value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addCounter(MetricsInfo info, long value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addGauge(MetricsInfo info, int value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addGauge(MetricsInfo info, long value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addGauge(MetricsInfo info, float value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsRecordBuilder addGauge(MetricsInfo info, double value) {
+    return add(info, value);
+  }
+
+  @Override
+  public MetricsCollector parent() {
+    return parent;
+  }
+
+  @Override
+  public String toString() {
+    return builder.toString();
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java

@@ -34,7 +34,7 @@ public class MutableCounterLong extends MutableCounter {
 
   private AtomicLong value = new AtomicLong();
 
-  MutableCounterLong(MetricsInfo info, long initValue) {
+  public MutableCounterLong(MetricsInfo info, long initValue) {
     super(info);
     this.value.set(initValue);
   }

+ 111 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -29,13 +29,13 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -54,8 +54,8 @@ import com.google.common.collect.Lists;
 public class NetworkTopology {
   public final static String DEFAULT_RACK = "/default-rack";
   public final static int DEFAULT_HOST_LEVEL = 2;
-  public static final Log LOG =
-    LogFactory.getLog(NetworkTopology.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(NetworkTopology.class);
 
   public static class InvalidTopologyException extends RuntimeException {
     private static final long serialVersionUID = 1L;
@@ -442,9 +442,7 @@ public class NetworkTopology {
           }
         }
       }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("NetworkTopology became:\n" + this.toString());
-      }
+      LOG.debug("NetworkTopology became:\n{}", this.toString());
     } finally {
       netlock.writeLock().unlock();
     }
@@ -517,9 +515,7 @@ public class NetworkTopology {
           numOfRacks--;
         }
       }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("NetworkTopology became:\n" + this.toString());
-      }
+      LOG.debug("NetworkTopology became:\n{}", this.toString());
     } finally {
       netlock.writeLock().unlock();
     }
@@ -659,6 +655,41 @@ public class NetworkTopology {
     return dis+2;
   }
 
+  /** Return the distance between two nodes by comparing their network paths
+   * without checking if they belong to the same ancestor node by reference.
+   * It is assumed that the distance from one node to its parent is 1
+   * The distance between two nodes is calculated by summing up their distances
+   * to their closest common ancestor.
+   * @param node1 one node
+   * @param node2 another node
+   * @return the distance between node1 and node2
+   */
+  static public int getDistanceByPath(Node node1, Node node2) {
+    if (node1 == null && node2 == null) {
+      return 0;
+    }
+    if (node1 == null || node2 == null) {
+      LOG.warn("One of the nodes is a null pointer");
+      return Integer.MAX_VALUE;
+    }
+    String[] paths1 = NodeBase.getPathComponents(node1);
+    String[] paths2 = NodeBase.getPathComponents(node2);
+    int dis = 0;
+    int index = 0;
+    int minLevel = Math.min(paths1.length, paths2.length);
+    while (index < minLevel) {
+      if (!paths1[index].equals(paths2[index])) {
+        // Once the path starts to diverge,  compute the distance that include
+        // the rest of paths.
+        dis += 2 * (minLevel - index);
+        break;
+      }
+      index++;
+    }
+    dis += Math.abs(paths1.length - paths2.length);
+    return dis;
+  }
+
   /** Check if two nodes are on the same rack
    * @param node1 one node (can be null)
    * @param node2 another node (can be null)
@@ -717,26 +748,45 @@ public class NetworkTopology {
     r.setSeed(seed);
   }
 
-  /** randomly choose one node from <i>scope</i>
-   * if scope starts with ~, choose one from the all nodes except for the
-   * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+  /**
+   * Randomly choose a node.
+   *
    * @param scope range of nodes from which a node will be chosen
    * @return the chosen node
+   *
+   * @see #chooseRandom(String, Collection)
+   */
+  public Node chooseRandom(final String scope) {
+    return chooseRandom(scope, null);
+  }
+
+  /**
+   * Randomly choose one node from <i>scope</i>.
+   *
+   * If scope starts with ~, choose one from the all nodes except for the
+   * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>.
+   * If excludedNodes is given, choose a node that's not in excludedNodes.
+   *
+   * @param scope range of nodes from which a node will be chosen
+   * @param excludedNodes nodes to be excluded from
+   * @return the chosen node
    */
-  public Node chooseRandom(String scope) {
+  public Node chooseRandom(final String scope,
+      final Collection<Node> excludedNodes) {
     netlock.readLock().lock();
     try {
       if (scope.startsWith("~")) {
-        return chooseRandom(NodeBase.ROOT, scope.substring(1));
+        return chooseRandom(NodeBase.ROOT, scope.substring(1), excludedNodes);
       } else {
-        return chooseRandom(scope, null);
+        return chooseRandom(scope, null, excludedNodes);
       }
     } finally {
       netlock.readLock().unlock();
     }
   }
 
-  private Node chooseRandom(String scope, String excludedScope){
+  private Node chooseRandom(final String scope, String excludedScope,
+      final Collection<Node> excludedNodes) {
     if (excludedScope != null) {
       if (scope.startsWith(excludedScope)) {
         return null;
@@ -747,7 +797,8 @@ public class NetworkTopology {
     }
     Node node = getNode(scope);
     if (!(node instanceof InnerNode)) {
-      return node;
+      return excludedNodes != null && excludedNodes.contains(node) ?
+          null : node;
     }
     InnerNode innerNode = (InnerNode)node;
     int numOfDatanodes = innerNode.getNumOfLeaves();
@@ -762,12 +813,36 @@ public class NetworkTopology {
       }
     }
     if (numOfDatanodes == 0) {
-      throw new InvalidTopologyException(
-          "Failed to find datanode (scope=\"" + String.valueOf(scope) +
-          "\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
+      LOG.warn("Failed to find datanode (scope=\"{}\" excludedScope=\"{}\").",
+          String.valueOf(scope), String.valueOf(excludedScope));
+      return null;
+    }
+    Node ret = null;
+    final int availableNodes;
+    if (excludedScope == null) {
+      availableNodes = countNumOfAvailableNodes(scope, excludedNodes);
+    } else {
+      availableNodes =
+          countNumOfAvailableNodes("~" + excludedScope, excludedNodes);
+    }
+    LOG.debug("Choosing random from {} available nodes on node {},"
+        + " scope={}, excludedScope={}, excludeNodes={}", availableNodes,
+        innerNode.toString(), scope, excludedScope, excludedNodes);
+    if (availableNodes > 0) {
+      do {
+        int leaveIndex = r.nextInt(numOfDatanodes);
+        ret = innerNode.getLeaf(leaveIndex, node);
+        if (excludedNodes == null || !excludedNodes.contains(ret)) {
+          break;
+        } else {
+          LOG.debug("Node {} is excluded, continuing.", ret);
+        }
+        // We've counted numOfAvailableNodes inside the lock, so there must be
+        // at least 1 satisfying node. Keep trying until we found it.
+      } while (true);
     }
-    int leaveIndex = r.nextInt(numOfDatanodes);
-    return innerNode.getLeaf(leaveIndex, node);
+    LOG.debug("chooseRandom returning {}", ret);
+    return ret;
   }
 
   /** return leaves in <i>scope</i>
@@ -795,6 +870,7 @@ public class NetworkTopology {
    * @param excludedNodes a list of nodes
    * @return number of available nodes
    */
+  @VisibleForTesting
   public int countNumOfAvailableNodes(String scope,
                                       Collection<Node> excludedNodes) {
     boolean isExcluded=false;
@@ -807,16 +883,18 @@ public class NetworkTopology {
     int excludedCountOffScope = 0; // the number of nodes outside scope & excludedNodes
     netlock.readLock().lock();
     try {
-      for (Node node : excludedNodes) {
-        node = getNode(NodeBase.getPath(node));
-        if (node == null) {
-          continue;
-        }
-        if ((NodeBase.getPath(node) + NodeBase.PATH_SEPARATOR_STR)
-            .startsWith(scope + NodeBase.PATH_SEPARATOR_STR)) {
-          excludedCountInScope++;
-        } else {
-          excludedCountOffScope++;
+      if (excludedNodes != null) {
+        for (Node node : excludedNodes) {
+          node = getNode(NodeBase.getPath(node));
+          if (node == null) {
+            continue;
+          }
+          if ((NodeBase.getPath(node) + NodeBase.PATH_SEPARATOR_STR)
+              .startsWith(scope + NodeBase.PATH_SEPARATOR_STR)) {
+            excludedCountInScope++;
+          } else {
+            excludedCountOffScope++;
+          }
         }
       }
       Node n = getNode(scope);

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java

@@ -113,6 +113,15 @@ public class NodeBase implements Node {
     return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
   }
 
+  /**
+   * Get the path components of a node.
+   * @param node a non-null node
+   * @return the path of a node
+   */
+  public static String[] getPathComponents(Node node) {
+    return getPath(node).split(PATH_SEPARATOR_STR);
+  }
+
   @Override
   public boolean equals(Object to) {
     if (this == to) {

+ 45 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java

@@ -25,6 +25,7 @@ import org.apache.directory.shared.kerberos.components.EncryptionKey;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ExitUtil;
@@ -52,6 +53,7 @@ import java.util.Collections;
 import java.util.Date;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.regex.Pattern;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
 import static org.apache.hadoop.security.UserGroupInformation.*;
@@ -121,6 +123,12 @@ public class KDiag extends Configured implements Tool, Closeable {
   private boolean nofail = false;
   private boolean nologin = false;
   private boolean jaas = false;
+  private boolean checkShortName = false;
+
+  /**
+   * A pattern that recognizes simple/non-simple names. Per KerberosName
+   */
+  private static final Pattern nonSimplePattern = Pattern.compile("[/@]");
 
   /**
    * Flag set to true if a {@link #verify(boolean, String, String, Object...)}
@@ -148,6 +156,8 @@ public class KDiag extends Configured implements Tool, Closeable {
 
   public static final String ARG_SECURE = "--secure";
 
+  public static final String ARG_VERIFYSHORTNAME = "--verifyshortname";
+
   @SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
   public KDiag(Configuration conf,
       PrintWriter out,
@@ -191,6 +201,7 @@ public class KDiag extends Configured implements Tool, Closeable {
     nofail = popOption(ARG_NOFAIL, args);
     jaas = popOption(ARG_JAAS, args);
     nologin = popOption(ARG_NOLOGIN, args);
+    checkShortName = popOption(ARG_VERIFYSHORTNAME, args);
 
     // look for list of resources
     String resource;
@@ -236,7 +247,9 @@ public class KDiag extends Configured implements Tool, Closeable {
       + arg(ARG_NOLOGIN, "", "Do not attempt to log in")
       + arg(ARG_OUTPUT, "<file>", "Write output to a file")
       + arg(ARG_RESOURCE, "<resource>", "Load an XML configuration resource")
-      + arg(ARG_SECURE, "", "Require the hadoop configuration to be secure");
+      + arg(ARG_SECURE, "", "Require the hadoop configuration to be secure")
+      + arg(ARG_VERIFYSHORTNAME, ARG_PRINCIPAL + " <principal>",
+      "Verify the short name of the specific principal does not contain '@' or '/'");
   }
 
   private String arg(String name, String params, String meaning) {
@@ -269,6 +282,7 @@ public class KDiag extends Configured implements Tool, Closeable {
     println("%s = %d", ARG_KEYLEN, minKeyLength);
     println("%s = %s", ARG_KEYTAB, keytab);
     println("%s = %s", ARG_PRINCIPAL, principal);
+    println("%s = %s", ARG_VERIFYSHORTNAME, checkShortName);
 
     // Fail fast on a JVM without JCE installed.
     validateKeyLength();
@@ -366,6 +380,10 @@ public class KDiag extends Configured implements Tool, Closeable {
       validateJAAS(jaas);
       validateNTPConf();
 
+      if (checkShortName) {
+        validateShortName();
+      }
+
       if (!nologin) {
         title("Logging in");
         if (keytab != null) {
@@ -419,6 +437,32 @@ public class KDiag extends Configured implements Tool, Closeable {
         aesLen, minKeyLength);
   }
 
+  /**
+   * Verify whether auth_to_local rules transform a principal name
+   * <p>
+   * Having a local user name "bar@foo.com" may be harmless, so it is noted at
+   * info. However if what was intended is a transformation to "bar"
+   * it can be difficult to debug, hence this check.
+   */
+  protected void validateShortName() {
+    failif(principal == null, CAT_KERBEROS, "No principal defined");
+
+    try {
+      KerberosName kn = new KerberosName(principal);
+      String result = kn.getShortName();
+      if (nonSimplePattern.matcher(result).find()) {
+        warn(CAT_KERBEROS, principal + " short name: " + result
+                + " still contains @ or /");
+      }
+    } catch (IOException e) {
+      throw new KerberosDiagsFailure(CAT_KERBEROS, e,
+              "Failed to get short name for " + principal, e);
+    } catch (IllegalArgumentException e) {
+      error(CAT_KERBEROS, "KerberosName(" + principal + ") failed: %s\n%s",
+              e, StringUtils.stringifyException(e));
+    }
+  }
+
   /**
    * Get the default realm.
    * <p>

+ 197 - 72
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -18,15 +18,14 @@
 package org.apache.hadoop.security;
 
 import java.io.FileInputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Hashtable;
 import java.util.List;
 
-import javax.naming.CommunicationException;
 import javax.naming.Context;
 import javax.naming.NamingEnumeration;
 import javax.naming.NamingException;
@@ -35,6 +34,8 @@ import javax.naming.directory.DirContext;
 import javax.naming.directory.InitialDirContext;
 import javax.naming.directory.SearchControls;
 import javax.naming.directory.SearchResult;
+import javax.naming.ldap.LdapName;
+import javax.naming.ldap.Rdn;
 
 import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
@@ -43,7 +44,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
 
 /**
  * An implementation of {@link GroupMappingServiceProvider} which
@@ -137,6 +137,13 @@ public class LdapGroupsMapping
   public static final String GROUP_SEARCH_FILTER_KEY = LDAP_CONFIG_PREFIX + ".search.filter.group";
   public static final String GROUP_SEARCH_FILTER_DEFAULT = "(objectClass=group)";
 
+  /*
+     * LDAP attribute to use for determining group membership
+     */
+  public static final String MEMBEROF_ATTR_KEY =
+      LDAP_CONFIG_PREFIX + ".search.attr.memberof";
+  public static final String MEMBEROF_ATTR_DEFAULT = "";
+
   /*
    * LDAP attribute to use for determining group membership
    */
@@ -191,13 +198,15 @@ public class LdapGroupsMapping
   private String baseDN;
   private String groupSearchFilter;
   private String userSearchFilter;
+  private String memberOfAttr;
   private String groupMemberAttr;
   private String groupNameAttr;
   private String posixUidAttr;
   private String posixGidAttr;
   private boolean isPosix;
+  private boolean useOneQuery;
 
-  public static int RECONNECT_RETRY_COUNT = 3;
+  public static final int RECONNECT_RETRY_COUNT = 3;
   
   /**
    * Returns list of groups for a user.
@@ -210,93 +219,197 @@ public class LdapGroupsMapping
    * @return list of groups for a given user
    */
   @Override
-  public synchronized List<String> getGroups(String user) throws IOException {
-    List<String> emptyResults = new ArrayList<String>();
+  public synchronized List<String> getGroups(String user) {
     /*
      * Normal garbage collection takes care of removing Context instances when they are no longer in use. 
      * Connections used by Context instances being garbage collected will be closed automatically.
      * So in case connection is closed and gets CommunicationException, retry some times with new new DirContext/connection. 
      */
-    try {
-      return doGetGroups(user);
-    } catch (CommunicationException e) {
-      LOG.warn("Connection is closed, will try to reconnect");
-    } catch (NamingException e) {
-      LOG.warn("Exception trying to get groups for user " + user + ": "
-          + e.getMessage());
-      return emptyResults;
-    }
-
-    int retryCount = 0;
-    while (retryCount ++ < RECONNECT_RETRY_COUNT) {
-      //reset ctx so that new DirContext can be created with new connection
-      this.ctx = null;
-      
+    for(int retry = 0; retry < RECONNECT_RETRY_COUNT; retry++) {
       try {
         return doGetGroups(user);
-      } catch (CommunicationException e) {
-        LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
       } catch (NamingException e) {
-        LOG.warn("Exception trying to get groups for user " + user + ":"
-            + e.getMessage());
-        return emptyResults;
+        LOG.warn("Failed to get groups for user " + user + " (retry=" + retry
+            + ") by " + e);
+        LOG.trace("TRACE", e);
       }
+
+      //reset ctx so that new DirContext can be created with new connection
+      this.ctx = null;
     }
     
-    return emptyResults;
+    return Collections.emptyList();
   }
-  
-  List<String> doGetGroups(String user) throws NamingException {
-    List<String> groups = new ArrayList<String>();
 
-    DirContext ctx = getDirContext();
+  /**
+   * A helper method to get the Relative Distinguished Name (RDN) from
+   * Distinguished name (DN). According to Active Directory documentation,
+   * a group object's RDN is a CN.
+   *
+   * @param distinguishedName A string representing a distinguished name.
+   * @throws NamingException if the DN is malformed.
+   * @return a string which represents the RDN
+   */
+  private String getRelativeDistinguishedName(String distinguishedName)
+      throws NamingException {
+    LdapName ldn = new LdapName(distinguishedName);
+    List<Rdn> rdns = ldn.getRdns();
+    if (rdns.isEmpty()) {
+      throw new NamingException("DN is empty");
+    }
+    Rdn rdn = rdns.get(rdns.size()-1);
+    if (rdn.getType().equalsIgnoreCase(groupNameAttr)) {
+      String groupName = (String)rdn.getValue();
+      return groupName;
+    }
+    throw new NamingException("Unable to find RDN: The DN " +
+    distinguishedName + " is malformed.");
+  }
 
-    // Search for the user. We'll only ever need to look at the first result
-    NamingEnumeration<SearchResult> results = ctx.search(baseDN,
-        userSearchFilter,
-        new Object[]{user},
-        SEARCH_CONTROLS);
-    if (results.hasMoreElements()) {
-      SearchResult result = results.nextElement();
-      String userDn = result.getNameInNamespace();
+  /**
+   * Look up groups using posixGroups semantics. Use posix gid/uid to find
+   * groups of the user.
+   *
+   * @param result the result object returned from the prior user lookup.
+   * @param c the context object of the LDAP connection.
+   * @return an object representing the search result.
+   *
+   * @throws NamingException if the server does not support posixGroups
+   * semantics.
+   */
+  private NamingEnumeration<SearchResult> lookupPosixGroup(SearchResult result,
+      DirContext c) throws NamingException {
+    String gidNumber = null;
+    String uidNumber = null;
+    Attribute gidAttribute = result.getAttributes().get(posixGidAttr);
+    Attribute uidAttribute = result.getAttributes().get(posixUidAttr);
+    String reason = "";
+    if (gidAttribute == null) {
+      reason = "Can't find attribute '" + posixGidAttr + "'.";
+    } else {
+      gidNumber = gidAttribute.get().toString();
+    }
+    if (uidAttribute == null) {
+      reason = "Can't find attribute '" + posixUidAttr + "'.";
+    } else {
+      uidNumber = uidAttribute.get().toString();
+    }
+    if (uidNumber != null && gidNumber != null) {
+      return c.search(baseDN,
+              "(&"+ groupSearchFilter + "(|(" + posixGidAttr + "={0})" +
+                  "(" + groupMemberAttr + "={1})))",
+              new Object[] {gidNumber, uidNumber},
+              SEARCH_CONTROLS);
+    }
+    throw new NamingException("The server does not support posixGroups " +
+        "semantics. Reason: " + reason +
+        " Returned user object: " + result.toString());
+  }
 
-      NamingEnumeration<SearchResult> groupResults = null;
+  /**
+   * Perform the second query to get the groups of the user.
+   *
+   * If posixGroups is enabled, use use posix gid/uid to find.
+   * Otherwise, use the general group member attribute to find it.
+   *
+   * @param result the result object returned from the prior user lookup.
+   * @param c the context object of the LDAP connection.
+   * @return a list of strings representing group names of the user.
+   * @throws NamingException if unable to find group names
+   */
+  private List<String> lookupGroup(SearchResult result, DirContext c)
+      throws NamingException {
+    List<String> groups = new ArrayList<String>();
 
-      if (isPosix) {
-        String gidNumber = null;
-        String uidNumber = null;
-        Attribute gidAttribute = result.getAttributes().get(posixGidAttr);
-        Attribute uidAttribute = result.getAttributes().get(posixUidAttr);
-        if (gidAttribute != null) {
-          gidNumber = gidAttribute.get().toString();
-        }
-        if (uidAttribute != null) {
-          uidNumber = uidAttribute.get().toString();
-        }
-        if (uidNumber != null && gidNumber != null) {
-          groupResults =
-              ctx.search(baseDN,
-                  "(&"+ groupSearchFilter + "(|(" + posixGidAttr + "={0})" +
-                      "(" + groupMemberAttr + "={1})))",
-                  new Object[] { gidNumber, uidNumber },
-                  SEARCH_CONTROLS);
+    NamingEnumeration<SearchResult> groupResults = null;
+    // perform the second LDAP query
+    if (isPosix) {
+      groupResults = lookupPosixGroup(result, c);
+    } else {
+      String userDn = result.getNameInNamespace();
+      groupResults =
+          c.search(baseDN,
+              "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
+              new Object[]{userDn},
+              SEARCH_CONTROLS);
+    }
+    // if the second query is successful, group objects of the user will be
+    // returned. Get group names from the returned objects.
+    if (groupResults != null) {
+      while (groupResults.hasMoreElements()) {
+        SearchResult groupResult = groupResults.nextElement();
+        Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
+        if (groupName == null) {
+          throw new NamingException("The group object does not have " +
+              "attribute '" + groupNameAttr + "'.");
         }
-      } else {
-        groupResults =
-            ctx.search(baseDN,
-                "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
-                new Object[]{userDn},
-                SEARCH_CONTROLS);
+        groups.add(groupName.get().toString());
       }
-      if (groupResults != null) {
-        while (groupResults.hasMoreElements()) {
-          SearchResult groupResult = groupResults.nextElement();
-          Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
-          groups.add(groupName.get().toString());
-        }
+    }
+    return groups;
+  }
+
+  /**
+   * Perform LDAP queries to get group names of a user.
+   *
+   * Perform the first LDAP query to get the user object using the user's name.
+   * If one-query is enabled, retrieve the group names from the user object.
+   * If one-query is disabled, or if it failed, perform the second query to
+   * get the groups.
+   *
+   * @param user user name
+   * @return a list of group names for the user. If the user can not be found,
+   * return an empty string array.
+   * @throws NamingException if unable to get group names
+   */
+  List<String> doGetGroups(String user) throws NamingException {
+    DirContext c = getDirContext();
+
+    // Search for the user. We'll only ever need to look at the first result
+    NamingEnumeration<SearchResult> results = c.search(baseDN,
+        userSearchFilter, new Object[]{user}, SEARCH_CONTROLS);
+    // return empty list if the user can not be found.
+    if (!results.hasMoreElements()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("doGetGroups(" + user + ") return no groups because the " +
+            "user is not found.");
       }
+      return new ArrayList<String>();
     }
+    SearchResult result = results.nextElement();
 
+    List<String> groups = null;
+    if (useOneQuery) {
+      try {
+        /**
+         * For Active Directory servers, the user object has an attribute
+         * 'memberOf' that represents the DNs of group objects to which the
+         * user belongs. So the second query may be skipped.
+         */
+        Attribute groupDNAttr = result.getAttributes().get(memberOfAttr);
+        if (groupDNAttr == null) {
+          throw new NamingException("The user object does not have '" +
+              memberOfAttr + "' attribute." +
+              "Returned user object: " + result.toString());
+        }
+        groups = new ArrayList<String>();
+        NamingEnumeration groupEnumeration = groupDNAttr.getAll();
+        while (groupEnumeration.hasMore()) {
+          String groupDN = groupEnumeration.next().toString();
+          groups.add(getRelativeDistinguishedName(groupDN));
+        }
+      } catch (NamingException e) {
+        // If the first lookup failed, fall back to the typical scenario.
+        LOG.info("Failed to get groups from the first lookup. Initiating " +
+                "the second LDAP query using the user's DN.", e);
+      }
+    }
+    if (groups == null || groups.isEmpty()) {
+      groups = lookupGroup(result, c);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("doGetGroups(" + user + ") return " + groups);
+    }
     return groups;
   }
 
@@ -379,6 +492,11 @@ public class LdapGroupsMapping
         conf.get(USER_SEARCH_FILTER_KEY, USER_SEARCH_FILTER_DEFAULT);
     isPosix = groupSearchFilter.contains(POSIX_GROUP) && userSearchFilter
         .contains(POSIX_ACCOUNT);
+    memberOfAttr =
+        conf.get(MEMBEROF_ATTR_KEY, MEMBEROF_ATTR_DEFAULT);
+    // if memberOf attribute is set, resolve group names from the attribute
+    // of user objects.
+    useOneQuery = !memberOfAttr.isEmpty();
     groupMemberAttr =
         conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
     groupNameAttr =
@@ -392,8 +510,15 @@ public class LdapGroupsMapping
     SEARCH_CONTROLS.setTimeLimit(dirSearchTimeout);
     // Limit the attributes returned to only those required to speed up the search.
     // See HADOOP-10626 and HADOOP-12001 for more details.
-    SEARCH_CONTROLS.setReturningAttributes(
-        new String[] {groupNameAttr, posixUidAttr, posixGidAttr});
+    String[] returningAttributes;
+    if (useOneQuery) {
+      returningAttributes = new String[] {
+          groupNameAttr, posixUidAttr, posixGidAttr, memberOfAttr};
+    } else {
+      returningAttributes = new String[] {
+          groupNameAttr, posixUidAttr, posixGidAttr};
+    }
+    SEARCH_CONTROLS.setReturningAttributes(returningAttributes);
 
     this.conf = conf;
   }

+ 75 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java

@@ -19,9 +19,13 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.net.URL;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +40,23 @@ import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
  *
  */
 public final class ProviderUtils {
+  @VisibleForTesting
+  public static final String NO_PASSWORD_WARN =
+      "WARNING: You have accepted the use of the default provider password\n" +
+      "by not configuring a password in one of the two following locations:\n";
+  @VisibleForTesting
+  public static final String NO_PASSWORD_ERROR =
+      "ERROR: The provider cannot find a password in the expected " +
+      "locations.\nPlease supply a password using one of the " +
+      "following two mechanisms:\n";
+  @VisibleForTesting
+  public static final String NO_PASSWORD_CONT =
+      "Continuing with the default provider password.\n";
+  @VisibleForTesting
+  public static final String NO_PASSWORD_INSTRUCTIONS_DOC =
+      "Please review the documentation regarding provider passwords in\n" +
+      "the keystore passwords section of the Credential Provider API\n";
+
   private static final Log LOG = LogFactory.getLog(ProviderUtils.class);
 
   /**
@@ -174,4 +195,58 @@ public final class ProviderUtils {
     }
     return conf;
   }
+
+  /**
+   * The password is either found in the environment or in a file. This
+   * routine implements the logic for locating the password in these
+   * locations.
+   *
+   * @param envWithPass  The name of the environment variable that might
+   *                     contain the password. Must not be null.
+   * @param fileWithPass The name of a file that could contain the password.
+   *                     Can be null.
+   * @return The password as a char []; null if not found.
+   * @throws IOException If fileWithPass is non-null and points to a
+   * nonexistent file or a file that fails to open and be read properly.
+   */
+  public static char[] locatePassword(String envWithPass, String fileWithPass)
+      throws IOException {
+    char[] pass = null;
+    // Get the password file from the conf, if not present from the user's
+    // environment var
+    if (System.getenv().containsKey(envWithPass)) {
+      pass = System.getenv(envWithPass).toCharArray();
+    }
+    if (pass == null) {
+      if (fileWithPass != null) {
+        ClassLoader cl = Thread.currentThread().getContextClassLoader();
+        URL pwdFile = cl.getResource(fileWithPass);
+        if (pwdFile == null) {
+          // Provided Password file does not exist
+          throw new IOException("Password file does not exist");
+        }
+        try (InputStream is = pwdFile.openStream()) {
+          pass = IOUtils.toString(is).trim().toCharArray();
+        }
+      }
+    }
+    return pass;
+  }
+
+  private static String noPasswordInstruction(String envKey, String fileKey) {
+    return
+        "    * In the environment variable " + envKey + "\n" +
+        "    * In a file referred to by the configuration entry\n" +
+        "      " + fileKey + ".\n" +
+        NO_PASSWORD_INSTRUCTIONS_DOC;
+  }
+
+  public static String noPasswordWarning(String envKey, String fileKey) {
+    return NO_PASSWORD_WARN + noPasswordInstruction(envKey, fileKey) +
+        NO_PASSWORD_CONT;
+  }
+
+  public static String noPasswordError(String envKey, String fileKey) {
+    return NO_PASSWORD_ERROR + noPasswordInstruction(envKey, fileKey);
+  }
 }

+ 11 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java

@@ -179,7 +179,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
       + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
       + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
       + "Therefore, same name means the same user or same group. To find the duplicated names/ids, one can do:\n"
-      + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systems,\n"
+      + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux, BSD and Solaris systems,\n"
       + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
   
   private static void reportDuplicateEntry(final String header,
@@ -273,7 +273,8 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
   }
 
   private boolean checkSupportedPlatform() {
-    if (!OS.startsWith("Linux") && !OS.startsWith("Mac")) {
+    if (!OS.startsWith("Linux") && !OS.startsWith("Mac")
+        && !OS.equals("SunOS") && !OS.contains("BSD")) {
       LOG.error("Platform is not supported:" + OS
           + ". Can't update user map and group map and"
           + " 'nobody' will be used for any user and group.");
@@ -385,7 +386,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
   // OR
   //     id -u <name> | awk '{print "<name>:"$1 }'
   //
-  private String getName2IdCmdLinux(final String name, final boolean isGrp) {
+  private String getName2IdCmdNIX(final String name, final boolean isGrp) {
     String cmd;
     if (isGrp) {
       cmd = "getent group " + name + " | cut -d: -f1,3";   
@@ -396,7 +397,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
   }
   
   // search for name with given id, return "<name>:<id>"
-  private String getId2NameCmdLinux(final int id, final boolean isGrp) {
+  private String getId2NameCmdNIX(final int id, final boolean isGrp) {
     String cmd = "getent ";
     cmd += isGrp? "group " : "passwd ";
     cmd += String.valueOf(id) + " | cut -d: -f1,3";
@@ -466,14 +467,14 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
     boolean updated = false;
     updateStaticMapping();
 
-    if (OS.startsWith("Linux")) {
+    if (OS.startsWith("Linux") || OS.equals("SunOS") || OS.contains("BSD")) {
       if (isGrp) {
         updated = updateMapInternal(gidNameMap, "group",
-            getName2IdCmdLinux(name, true), ":",
+            getName2IdCmdNIX(name, true), ":",
             staticMapping.gidMapping);
       } else {
         updated = updateMapInternal(uidNameMap, "user",
-            getName2IdCmdLinux(name, false), ":",
+            getName2IdCmdNIX(name, false), ":",
             staticMapping.uidMapping);
       }
     } else {
@@ -502,14 +503,14 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
     boolean updated = false;
     updateStaticMapping();
 
-    if (OS.startsWith("Linux")) {
+    if (OS.startsWith("Linux") || OS.equals("SunOS") || OS.contains("BSD")) {
       if (isGrp) {
         updated = updateMapInternal(gidNameMap, "group",
-            getId2NameCmdLinux(id, true), ":",
+            getId2NameCmdNIX(id, true), ":",
             staticMapping.gidMapping);
       } else {
         updated = updateMapInternal(uidNameMap, "user",
-            getId2NameCmdLinux(id, false), ":",
+            getId2NameCmdNIX(id, false), ":",
             staticMapping.uidMapping);
       }
     } else {

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -1613,9 +1613,11 @@ public class UserGroupInformation {
       return result.toArray(new String[result.size()]);
     } catch (IOException ie) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("No groups available for user " + getShortUserName());
+        LOG.debug("Failed to get groups for user " + getShortUserName()
+            + " by " + ie);
+        LOG.trace("TRACE", ie);
       }
-      return new String[0];
+      return StringUtils.emptyStringArray;
     }
   }
   

+ 66 - 47
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.security.alias;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -33,7 +32,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
-import java.net.URL;
+import java.security.GeneralSecurityException;
 import java.security.KeyStore;
 import java.security.KeyStoreException;
 import java.security.NoSuchAlgorithmException;
@@ -62,68 +61,36 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
   public static final Log LOG = LogFactory.getLog(
       AbstractJavaKeyStoreProvider.class);
-  public static final String CREDENTIAL_PASSWORD_NAME =
+  public static final String CREDENTIAL_PASSWORD_ENV_VAR =
       "HADOOP_CREDSTORE_PASSWORD";
-  public static final String KEYSTORE_PASSWORD_FILE_KEY =
+  public static final String CREDENTIAL_PASSWORD_FILE_KEY =
       "hadoop.security.credstore.java-keystore-provider.password-file";
-  public static final String KEYSTORE_PASSWORD_DEFAULT = "none";
+  public static final String CREDENTIAL_PASSWORD_DEFAULT = "none";
 
   private Path path;
   private final URI uri;
-  private final KeyStore keyStore;
+  private KeyStore keyStore;
   private char[] password = null;
   private boolean changed = false;
   private Lock readLock;
   private Lock writeLock;
+  private final Configuration conf;
 
   protected AbstractJavaKeyStoreProvider(URI uri, Configuration conf)
       throws IOException {
     this.uri = uri;
-    initFileSystem(uri, conf);
-    // Get the password from the user's environment
-    if (System.getenv().containsKey(CREDENTIAL_PASSWORD_NAME)) {
-      password = System.getenv(CREDENTIAL_PASSWORD_NAME).toCharArray();
-    }
-    // if not in ENV get check for file
-    if (password == null) {
-      String pwFile = conf.get(KEYSTORE_PASSWORD_FILE_KEY);
-      if (pwFile != null) {
-        ClassLoader cl = Thread.currentThread().getContextClassLoader();
-        URL pwdFile = cl.getResource(pwFile);
-        if (pwdFile != null) {
-          try (InputStream is = pwdFile.openStream()) {
-            password = IOUtils.toString(is).trim().toCharArray();
-          }
-        }
-      }
-    }
-    if (password == null) {
-      password = KEYSTORE_PASSWORD_DEFAULT.toCharArray();
-    }
-    try {
-      keyStore = KeyStore.getInstance("jceks");
-      if (keystoreExists()) {
-        stashOriginalFilePermissions();
-        try (InputStream in = getInputStreamForFile()) {
-          keyStore.load(in, password);
-        }
-      } else {
-        createPermissions("700");
-        // required to create an empty keystore. *sigh*
-        keyStore.load(null, password);
-      }
-    } catch (KeyStoreException e) {
-      throw new IOException("Can't create keystore", e);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IOException("Can't load keystore " + getPathAsString(), e);
-    } catch (CertificateException e) {
-      throw new IOException("Can't load keystore " + getPathAsString(), e);
-    }
+    this.conf = conf;
+    initFileSystem(uri);
+    locateKeystore();
     ReadWriteLock lock = new ReentrantReadWriteLock(true);
     readLock = lock.readLock();
     writeLock = lock.writeLock();
   }
 
+  protected Configuration getConf() {
+    return conf;
+  }
+
   public Path getPath() {
     return path;
   }
@@ -189,7 +156,7 @@ public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
 
   protected abstract void stashOriginalFilePermissions() throws IOException;
 
-  protected void initFileSystem(URI keystoreUri, Configuration conf)
+  protected void initFileSystem(URI keystoreUri)
       throws IOException {
     path = ProviderUtils.unnestUri(keystoreUri);
     if (LOG.isDebugEnabled()) {
@@ -332,6 +299,58 @@ public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
     }
   }
 
+  /**
+   * Open up and initialize the keyStore.
+   *
+   * @throws IOException If there is a problem reading the password file
+   * or a problem reading the keystore.
+   */
+  private void locateKeystore() throws IOException {
+    try {
+      password = ProviderUtils.locatePassword(CREDENTIAL_PASSWORD_ENV_VAR,
+          conf.get(CREDENTIAL_PASSWORD_FILE_KEY));
+      if (password == null) {
+        password = CREDENTIAL_PASSWORD_DEFAULT.toCharArray();
+      }
+      KeyStore ks;
+      ks = KeyStore.getInstance("jceks");
+      if (keystoreExists()) {
+        stashOriginalFilePermissions();
+        try (InputStream in = getInputStreamForFile()) {
+          ks.load(in, password);
+        }
+      } else {
+        createPermissions("600");
+        // required to create an empty keystore. *sigh*
+        ks.load(null, password);
+      }
+      keyStore = ks;
+    } catch (KeyStoreException e) {
+      throw new IOException("Can't create keystore", e);
+    } catch (GeneralSecurityException e) {
+      throw new IOException("Can't load keystore " + getPathAsString(), e);
+    }
+  }
+
+  @Override
+  public boolean needsPassword() throws IOException {
+    return (null == ProviderUtils.locatePassword(CREDENTIAL_PASSWORD_ENV_VAR,
+        conf.get(CREDENTIAL_PASSWORD_FILE_KEY)));
+
+  }
+
+  @Override
+  public String noPasswordWarning() {
+    return ProviderUtils.noPasswordWarning(CREDENTIAL_PASSWORD_ENV_VAR,
+            CREDENTIAL_PASSWORD_FILE_KEY);
+  }
+
+  @Override
+  public String noPasswordError() {
+    return ProviderUtils.noPasswordError(CREDENTIAL_PASSWORD_ENV_VAR,
+        CREDENTIAL_PASSWORD_FILE_KEY);
+  }
+
   @Override
   public String toString() {
     return uri.toString();

+ 35 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java

@@ -36,7 +36,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Unstable
 public abstract class CredentialProvider {
   public static final String CLEAR_TEXT_FALLBACK 
-    = "hadoop.security.credential.clear-text-fallback";
+      = "hadoop.security.credential.clear-text-fallback";
 
   /**
    * The combination of both the alias and the actual credential value.
@@ -87,7 +87,8 @@ public abstract class CredentialProvider {
   }
 
   /**
-   * Ensures that any changes to the credentials are written to persistent store.
+   * Ensures that any changes to the credentials are written to persistent
+   * store.
    * @throws IOException
    */
   public abstract void flush() throws IOException;
@@ -123,4 +124,36 @@ public abstract class CredentialProvider {
    * @throws IOException
    */
   public abstract void deleteCredentialEntry(String name) throws IOException;
+
+  /**
+   * Does this provider require a password? This means that a password is
+   * required for normal operation, and it has not been found through normal
+   * means. If true, the password should be provided by the caller using
+   * setPassword().
+   * @return Whether or not the provider requires a password
+   * @throws IOException
+   */
+  public boolean needsPassword() throws IOException {
+    return false;
+  }
+
+  /**
+   * If a password for the provider is needed, but is not provided, this will
+   * return a warning and instructions for supplying said password to the
+   * provider.
+   * @return A warning and instructions for supplying the password
+   */
+  public String noPasswordWarning() {
+    return null;
+  }
+
+  /**
+   * If a password for the provider is needed, but is not provided, this will
+   * return an error message and instructions for supplying said password to
+   * the provider.
+   * @return An error message and instructions for supplying the password
+   */
+  public String noPasswordError() {
+    return null;
+  }
 }

+ 98 - 68
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java

@@ -26,6 +26,7 @@ import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
 import java.util.List;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.Tool;
@@ -37,24 +38,36 @@ import org.apache.hadoop.util.ToolRunner;
  */
 public class CredentialShell extends Configured implements Tool {
   final static private String USAGE_PREFIX = "Usage: hadoop credential " +
-  		"[generic options]\n";
+      "[generic options]\n";
   final static private String COMMANDS =
-      "   [--help]\n" +
+      "   [-help]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
       "   [" + ListCommand.USAGE + "]\n";
+  @VisibleForTesting
+  public static final String NO_VALID_PROVIDERS =
+      "There are no valid (non-transient) providers configured.\n" +
+      "No action has been taken. Use the -provider option to specify\n" +
+      "a provider. If you want to use a transient provider then you\n" +
+      "MUST use the -provider argument.";
 
   private boolean interactive = true;
   private Command command = null;
 
-  /** allows stdout to be captured if necessary */
+  /** If true, fail if the provider requires a password and none is given. */
+  private boolean strict = false;
+
+  /** Allows stdout to be captured if necessary. */
+  @VisibleForTesting
   public PrintStream out = System.out;
-  /** allows stderr to be captured if necessary */
+  /** Allows stderr to be captured if necessary. */
+  @VisibleForTesting
   public PrintStream err = System.err;
 
   private boolean userSuppliedProvider = false;
   private String value = null;
   private PasswordReader passwordReader;
+  private boolean isHelp = false;
 
   @Override
   public int run(String[] args) throws Exception {
@@ -64,10 +77,12 @@ public class CredentialShell extends Configured implements Tool {
       if (exitCode != 0) {
         return exitCode;
       }
-      if (command.validate()) {
+      if (!isHelp) {
+        if (command.validate()) {
           command.execute();
-      } else {
-        exitCode = 1;
+        } else {
+          exitCode = 1;
+        }
       }
     } catch (Exception e) {
       e.printStackTrace(err);
@@ -77,7 +92,7 @@ public class CredentialShell extends Configured implements Tool {
   }
 
   /**
-   * Parse the command line arguments and initialize the data
+   * Parse the command line arguments and initialize the data.
    * <pre>
    * % hadoop credential create alias [-provider providerPath]
    * % hadoop credential list [-provider providerPath]
@@ -130,6 +145,8 @@ public class CredentialShell extends Configured implements Tool {
             args[++i]);
       } else if (args[i].equals("-f") || (args[i].equals("-force"))) {
         interactive = false;
+      } else if (args[i].equals("-strict")) {
+        strict = true;
       } else if (args[i].equals("-v") || (args[i].equals("-value"))) {
         value = args[++i];
       } else if (args[i].equals("-help")) {
@@ -145,13 +162,13 @@ public class CredentialShell extends Configured implements Tool {
   }
 
   private void printCredShellUsage() {
+    isHelp = true;
     out.println(USAGE_PREFIX + COMMANDS);
     if (command != null) {
       out.println(command.getUsage());
-    }
-    else {
+    } else {
       out.println("=========================================================" +
-      		"======");
+          "======");
       out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
       out.println("=========================================================" +
           "======");
@@ -170,17 +187,16 @@ public class CredentialShell extends Configured implements Tool {
     }
 
     protected CredentialProvider getCredentialProvider() {
-      CredentialProvider provider = null;
+      CredentialProvider prov = null;
       List<CredentialProvider> providers;
       try {
         providers = CredentialProviderFactory.getProviders(getConf());
         if (userSuppliedProvider) {
-          provider = providers.get(0);
-        }
-        else {
+          prov = providers.get(0);
+        } else {
           for (CredentialProvider p : providers) {
             if (!p.isTransient()) {
-              provider = p;
+              prov = p;
               break;
             }
           }
@@ -188,11 +204,14 @@ public class CredentialShell extends Configured implements Tool {
       } catch (IOException e) {
         e.printStackTrace(err);
       }
-      return provider;
+      if (prov == null) {
+        out.println(NO_VALID_PROVIDERS);
+      }
+      return prov;
     }
 
     protected void printProviderWritten() {
-        out.println(provider.getClass().getName() + " has been updated.");
+      out.println("Provider " + provider.toString() + " has been updated.");
     }
 
     protected void warnIfTransientProvider() {
@@ -207,35 +226,32 @@ public class CredentialShell extends Configured implements Tool {
   }
 
   private class ListCommand extends Command {
-    public static final String USAGE = "list [-provider provider-path]";
+    public static final String USAGE =
+        "list [-provider provider-path] [-strict]";
     public static final String DESC =
         "The list subcommand displays the aliases contained within \n" +
-        "a particular provider - as configured in core-site.xml or " +
-        "indicated\nthrough the -provider argument.";
+        "a particular provider - as configured in core-site.xml or\n" +
+        "indicated through the -provider argument. If -strict is supplied,\n" +
+        "fail immediately if the provider requires a password and none is\n" +
+        "provided.";
 
     public boolean validate() {
-      boolean rc = true;
       provider = getCredentialProvider();
-      if (provider == null) {
-        out.println("There are no non-transient CredentialProviders configured.\n"
-            + "Consider using the -provider option to indicate the provider\n"
-            + "to use. If you want to list a transient provider then you\n"
-            + "you MUST use the -provider argument.");
-        rc = false;
-      }
-      return rc;
+      return (provider != null);
     }
 
     public void execute() throws IOException {
       List<String> aliases;
       try {
         aliases = provider.getAliases();
-        out.println("Listing aliases for CredentialProvider: " + provider.toString());
+        out.println("Listing aliases for CredentialProvider: " +
+            provider.toString());
         for (String alias : aliases) {
           out.println(alias);
         }
       } catch (IOException e) {
-        out.println("Cannot list aliases for CredentialProvider: " + provider.toString()
+        out.println("Cannot list aliases for CredentialProvider: " +
+            provider.toString()
             + ": " + e.getMessage());
         throw e;
       }
@@ -249,15 +265,17 @@ public class CredentialShell extends Configured implements Tool {
 
   private class DeleteCommand extends Command {
     public static final String USAGE =
-        "delete <alias> [-f] [-provider provider-path]";
+        "delete <alias> [-f] [-provider provider-path] [-strict]";
     public static final String DESC =
         "The delete subcommand deletes the credential\n" +
         "specified as the <alias> argument from within the provider\n" +
         "indicated through the -provider argument. The command asks for\n" +
-        "confirmation unless the -f option is specified.";
+        "confirmation unless the -f option is specified. If -strict is\n" +
+        "supplied, fail immediately if the provider requires a password\n" +
+        "and none is given.";
 
-    String alias = null;
-    boolean cont = true;
+    private String alias = null;
+    private boolean cont = true;
 
     public DeleteCommand(String alias) {
       this.alias = alias;
@@ -267,10 +285,6 @@ public class CredentialShell extends Configured implements Tool {
     public boolean validate() {
       provider = getCredentialProvider();
       if (provider == null) {
-        out.println("There are no valid CredentialProviders configured.\n"
-            + "Nothing will be deleted.\n"
-            + "Consider using the -provider option to indicate the provider"
-            + " to use.");
         return false;
       }
       if (alias == null) {
@@ -298,16 +312,17 @@ public class CredentialShell extends Configured implements Tool {
 
     public void execute() throws IOException {
       warnIfTransientProvider();
-      out.println("Deleting credential: " + alias + " from CredentialProvider: "
-          + provider.toString());
+      out.println("Deleting credential: " + alias +
+          " from CredentialProvider: " + provider.toString());
       if (cont) {
         try {
           provider.deleteCredentialEntry(alias);
-          out.println(alias + " has been successfully deleted.");
+          out.println("Credential " + alias +
+              " has been successfully deleted.");
           provider.flush();
           printProviderWritten();
         } catch (IOException e) {
-          out.println(alias + " has NOT been deleted.");
+          out.println("Credential " + alias + " has NOT been deleted.");
           throw e;
         }
       }
@@ -320,14 +335,17 @@ public class CredentialShell extends Configured implements Tool {
   }
 
   private class CreateCommand extends Command {
-    public static final String USAGE =
-        "create <alias> [-provider provider-path]";
+    public static final String USAGE = "create <alias> [-value alias-value] " +
+        "[-provider provider-path] [-strict]";
     public static final String DESC =
-        "The create subcommand creates a new credential for the name specified\n" +
-        "as the <alias> argument within the provider indicated through\n" +
-        "the -provider argument.";
+        "The create subcommand creates a new credential for the name\n" +
+        "specified as the <alias> argument within the provider indicated\n" +
+        "through the -provider argument. If -strict is supplied, fail\n" +
+        "immediately if the provider requires a password and none is given.\n" +
+        "If -value is provided, use that for the value of the credential\n" +
+        "instead of prompting the user.";
 
-    String alias = null;
+    private String alias = null;
 
     public CreateCommand(String alias) {
       this.alias = alias;
@@ -335,13 +353,20 @@ public class CredentialShell extends Configured implements Tool {
 
     public boolean validate() {
       boolean rc = true;
-      provider = getCredentialProvider();
-      if (provider == null) {
-        out.println("There are no valid CredentialProviders configured." +
-        		"\nCredential will not be created.\n"
-            + "Consider using the -provider option to indicate the provider" +
-            " to use.");
-        rc = false;
+      try {
+        provider = getCredentialProvider();
+        if (provider == null) {
+          rc = false;
+        } else if (provider.needsPassword()) {
+          if (strict) {
+            out.println(provider.noPasswordError());
+            rc = false;
+          } else {
+            out.println(provider.noPasswordWarning());
+          }
+        }
+      } catch (IOException e) {
+        e.printStackTrace(err);
       }
       if (alias == null) {
         out.println("There is no alias specified. Please provide the" +
@@ -358,19 +383,20 @@ public class CredentialShell extends Configured implements Tool {
         if (value != null) {
           // testing only
           credential = value.toCharArray();
-        }
-        else {
-           credential = promptForCredential();
+        } else {
+          credential = promptForCredential();
         }
         provider.createCredentialEntry(alias, credential);
-        out.println(alias + " has been successfully created.");
         provider.flush();
+        out.println(alias + " has been successfully created.");
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        out.println(alias + " has NOT been created. " + e.getMessage());
+        out.println("Credential " + alias + " has NOT been created. " +
+            e.getMessage());
         throw e;
       } catch (IOException e) {
-        out.println(alias + " has NOT been created. " + e.getMessage());
+        out.println("Credential " + alias + " has NOT been created. " +
+            e.getMessage());
         throw e;
       }
     }
@@ -391,16 +417,20 @@ public class CredentialShell extends Configured implements Tool {
 
     boolean noMatch;
     do {
-      char[] newPassword1 = c.readPassword("Enter password: ");
-      char[] newPassword2 = c.readPassword("Enter password again: ");
+      char[] newPassword1 = c.readPassword("Enter alias password: ");
+      char[] newPassword2 = c.readPassword("Enter alias password again: ");
       noMatch = !Arrays.equals(newPassword1, newPassword2);
       if (noMatch) {
-        if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
+        if (newPassword1 != null) {
+          Arrays.fill(newPassword1, ' ');
+        }
         c.format("Passwords don't match. Try again.%n");
       } else {
         cred = newPassword1;
       }
-      if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
+      if (newPassword2 != null) {
+        Arrays.fill(newPassword2, ' ');
+      }
     } while (noMatch);
     return cred;
   }
@@ -416,7 +446,7 @@ public class CredentialShell extends Configured implements Tool {
     passwordReader = reader;
   }
   
-  // to facilitate testing since Console is a final class...
+  /** To facilitate testing since Console is a final class. */
   public static class PasswordReader {
     public char[] readPassword(String prompt) {
       Console console = System.console();

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java

@@ -83,10 +83,10 @@ public class JavaKeyStoreProvider extends AbstractJavaKeyStoreProvider {
     permissions = s.getPermission();
   }
 
-  protected void initFileSystem(URI uri, Configuration conf)
+  protected void initFileSystem(URI uri)
       throws IOException {
-    super.initFileSystem(uri, conf);
-    fs = getPath().getFileSystem(conf);
+    super.initFileSystem(uri);
+    fs = getPath().getFileSystem(getConf());
   }
 
   /**

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java

@@ -121,9 +121,9 @@ public final class LocalJavaKeyStoreProvider extends
   }
 
   @Override
-  protected void initFileSystem(URI uri, Configuration conf)
+  protected void initFileSystem(URI uri)
       throws IOException {
-    super.initFileSystem(uri, conf);
+    super.initFileSystem(uri);
     try {
       file = new File(new URI(getPath().toString()));
       if (LOG.isDebugEnabled()) {

+ 1 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.security.http;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -228,16 +227,7 @@ public class RestCsrfPreventionFilter implements Filter {
    */
   public static Map<String, String> getFilterParams(Configuration conf,
       String confPrefix) {
-    Map<String, String> filterConfigMap = new HashMap<>();
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(confPrefix)) {
-        String value = conf.get(name);
-        name = name.substring(confPrefix.length());
-        filterConfigMap.put(name, value);
-      }
-    }
-    return filterConfigMap;
+    return conf.getPropsWithPrefix(confPrefix);
   }
 
   /**

+ 167 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/XFrameOptionsFilter.java

@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.http;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * This filter protects webapps from clickjacking attacks that
+ * are possible through use of Frames to embed the resources in another
+ * application and intercept clicks to accomplish nefarious things.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class XFrameOptionsFilter implements Filter {
+  public static final String X_FRAME_OPTIONS = "X-Frame-Options";
+  public static final String CUSTOM_HEADER_PARAM = "xframe-options";
+
+  private String option = "DENY";
+
+  @Override
+  public void destroy() {
+  }
+
+  @Override
+  public void doFilter(ServletRequest req, ServletResponse res,
+      FilterChain chain) throws IOException, ServletException {
+    ((HttpServletResponse) res).setHeader(X_FRAME_OPTIONS, option);
+    chain.doFilter(req,
+        new XFrameOptionsResponseWrapper((HttpServletResponse) res));
+  }
+
+  @Override
+  public void init(FilterConfig config) throws ServletException {
+    String customOption = config.getInitParameter(CUSTOM_HEADER_PARAM);
+    if (customOption != null) {
+      option = customOption;
+    }
+  }
+
+  /**
+   * Constructs a mapping of configuration properties to be used for filter
+   * initialization.  The mapping includes all properties that start with the
+   * specified configuration prefix.  Property names in the mapping are trimmed
+   * to remove the configuration prefix.
+   *
+   * @param conf configuration to read
+   * @param confPrefix configuration prefix
+   * @return mapping of configuration properties to be used for filter
+   *     initialization
+   */
+  public static Map<String, String> getFilterParams(Configuration conf,
+      String confPrefix) {
+    return conf.getPropsWithPrefix(confPrefix);
+  }
+
+  /**
+   * This wrapper allows the rest of the filter pipeline to
+   * see the configured value when interrogating the response.
+   * It also blocks other filters from setting the value to
+   * anything other than what is configured.
+   *
+   */
+  public class XFrameOptionsResponseWrapper
+      extends HttpServletResponseWrapper {
+    /**
+     * Ctor to take wrap the provided response.
+     * @param response the response to wrap
+     */
+    public XFrameOptionsResponseWrapper(HttpServletResponse response) {
+      super(response);
+    }
+
+    @Override
+    public void addHeader(String name, String value) {
+      // don't allow additional values to be added along
+      // with the configured options value
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.addHeader(name, value);
+      }
+    }
+
+    @Override
+    public void setHeader(String name, String value) {
+      // don't allow overwriting of configured value
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.setHeader(name, value);
+      }
+    }
+
+    @Override
+    public void setDateHeader(String name, long date) {
+      // don't allow overwriting of configured value
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.setDateHeader(name, date);
+      }
+    }
+
+    @Override
+   public void addDateHeader(String name, long date) {
+      // don't allow additional values to be added along
+      // with the configured options value
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.addDateHeader(name, date);
+      }
+    }
+
+    @Override
+    public void setIntHeader(String name, int value) {
+      // don't allow overwriting of configured value
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.setIntHeader(name, value);
+      }
+    }
+
+    @Override
+    // don't allow additional values to be added along
+    // with the configured options value
+    public void addIntHeader(String name, int value) {
+      if (!name.equals(X_FRAME_OPTIONS)) {
+        super.addIntHeader(name, value);
+      }
+    }
+
+    @Override
+    public boolean containsHeader(String name) {
+      boolean contains = false;
+      // allow the filterchain and subsequent
+      // filters to see that the header is set
+      if (name.equals(X_FRAME_OPTIONS)) {
+        return (option != null);
+      } else {
+        super.containsHeader(name);
+      }
+      return contains;
+    }
+  }
+}
+

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/package-info.java

@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.security.http;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java

@@ -153,7 +153,7 @@ public class Token<T extends TokenIdentifier> implements Writable {
       cls = tokenKindMap.get(kind);
     }
     if (cls == null) {
-      LOG.warn("Cannot find class for token kind " + kind);
+      LOG.debug("Cannot find class for token kind " + kind);
       return null;
     }
     return cls;

+ 29 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java

@@ -232,10 +232,35 @@ extends TokenIdentifier {
   public String toString() {
     StringBuilder buffer = new StringBuilder();
     buffer
-        .append("owner=" + owner + ", renewer=" + renewer + ", realUser="
-            + realUser + ", issueDate=" + issueDate + ", maxDate=" + maxDate
-            + ", sequenceNumber=" + sequenceNumber + ", masterKeyId="
-            + masterKeyId);
+        .append(getKind())
+        .append(" owner=").append(owner)
+        .append(", renewer=").append(renewer)
+        .append(", realUser=").append(realUser)
+        .append(", issueDate=").append(issueDate)
+        .append(", maxDate=").append(maxDate)
+        .append(", sequenceNumber=").append(sequenceNumber)
+        .append(", masterKeyId=").append(masterKeyId);
+    return buffer.toString();
+  }
+  /*
+   * A frozen version of toString() to be used to be backward compatible.
+   * When backward compatibility is not needed, use toString(), which provides
+   * more info and is supposed to evolve, see HDFS-9732.
+   * Don't change this method except for major revisions.
+   *
+   * NOTE:
+   * Currently this method is used by CLI for backward compatibility.
+   */
+  public String toStringStable() {
+    StringBuilder buffer = new StringBuilder();
+    buffer
+        .append("owner=").append(owner)
+        .append(", renewer=").append(renewer)
+        .append(", realUser=").append(realUser)
+        .append(", issueDate=").append(issueDate)
+        .append(", maxDate=").append(maxDate)
+        .append(", sequenceNumber=").append(sequenceNumber)
+        .append(", masterKeyId=").append(masterKeyId);
     return buffer.toString();
   }
 }

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java

@@ -216,8 +216,11 @@ public class DelegationTokenAuthenticationFilter
 
   @VisibleForTesting
   static String getDoAs(HttpServletRequest request) {
-    List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
-        UTF8_CHARSET);
+    String queryString = request.getQueryString();
+    if (queryString == null) {
+      return null;
+    }
+    List<NameValuePair> list = URLEncodedUtils.parse(queryString, UTF8_CHARSET);
     if (list != null) {
       for (NameValuePair nv : list) {
         if (DelegationTokenAuthenticatedURL.DO_AS.

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java

@@ -45,8 +45,11 @@ class ServletUtils {
    */
   public static String getParameter(HttpServletRequest request, String name)
       throws IOException {
-    List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
-        UTF8_CHARSET);
+    String queryString = request.getQueryString();
+    if (queryString == null) {
+      return null;
+    }
+    List<NameValuePair> list = URLEncodedUtils.parse(queryString, UTF8_CHARSET);
     if (list != null) {
       for (NameValuePair nv : list) {
         if (name.equals(nv.getName())) {

+ 31 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java

@@ -53,7 +53,7 @@ public class SysInfoLinux extends SysInfo {
    */
   private static final String PROCFS_MEMFILE = "/proc/meminfo";
   private static final Pattern PROCFS_MEMFILE_FORMAT =
-      Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
+      Pattern.compile("^([a-zA-Z_()]*):[ \t]*([0-9]*)[ \t]*(kB)?");
 
   // We need the values for the following keys in meminfo
   private static final String MEMTOTAL_STRING = "MemTotal";
@@ -61,6 +61,12 @@ public class SysInfoLinux extends SysInfo {
   private static final String MEMFREE_STRING = "MemFree";
   private static final String SWAPFREE_STRING = "SwapFree";
   private static final String INACTIVE_STRING = "Inactive";
+  private static final String INACTIVEFILE_STRING = "Inactive(file)";
+  private static final String HARDWARECORRUPTED_STRING = "HardwareCorrupted";
+  private static final String HUGEPAGESTOTAL_STRING = "HugePages_Total";
+  private static final String HUGEPAGESIZE_STRING = "Hugepagesize";
+
+
 
   /**
    * Patterns for parsing /proc/cpuinfo.
@@ -122,7 +128,13 @@ public class SysInfoLinux extends SysInfo {
   private long swapSize = 0;
   private long ramSizeFree = 0;  // free ram space on the machine (kB)
   private long swapSizeFree = 0; // free swap space on the machine (kB)
-  private long inactiveSize = 0; // inactive cache memory (kB)
+  private long inactiveSize = 0; // inactive memory (kB)
+  private long inactiveFileSize = -1; // inactive cache memory, -1 if not there
+  private long hardwareCorruptSize = 0; // RAM corrupt and not available
+  private long hugePagesTotal = 0; // # of hugepages reserved
+  private long hugePageSize = 0; // # size of each hugepage
+
+
   /* number of logical processors on the system. */
   private int numProcessors = 0;
   /* number of physical cores on the system. */
@@ -245,6 +257,14 @@ public class SysInfoLinux extends SysInfo {
             swapSizeFree = Long.parseLong(mat.group(2));
           } else if (mat.group(1).equals(INACTIVE_STRING)) {
             inactiveSize = Long.parseLong(mat.group(2));
+          } else if (mat.group(1).equals(INACTIVEFILE_STRING)) {
+            inactiveFileSize = Long.parseLong(mat.group(2));
+          } else if (mat.group(1).equals(HARDWARECORRUPTED_STRING)) {
+            hardwareCorruptSize = Long.parseLong(mat.group(2));
+          } else if (mat.group(1).equals(HUGEPAGESTOTAL_STRING)) {
+            hugePagesTotal = Long.parseLong(mat.group(2));
+          } else if (mat.group(1).equals(HUGEPAGESIZE_STRING)) {
+            hugePageSize = Long.parseLong(mat.group(2));
           }
         }
         str = in.readLine();
@@ -554,28 +574,31 @@ public class SysInfoLinux extends SysInfo {
   @Override
   public long getPhysicalMemorySize() {
     readProcMemInfoFile();
-    return ramSize * 1024;
+    return (ramSize
+            - hardwareCorruptSize
+            - (hugePagesTotal * hugePageSize)) * 1024;
   }
 
   /** {@inheritDoc} */
   @Override
   public long getVirtualMemorySize() {
-    readProcMemInfoFile();
-    return (ramSize + swapSize) * 1024;
+    return getPhysicalMemorySize() + (swapSize * 1024);
   }
 
   /** {@inheritDoc} */
   @Override
   public long getAvailablePhysicalMemorySize() {
     readProcMemInfoFile(true);
-    return (ramSizeFree + inactiveSize) * 1024;
+    long inactive = inactiveFileSize != -1
+        ? inactiveFileSize
+        : inactiveSize;
+    return (ramSizeFree + inactive) * 1024;
   }
 
   /** {@inheritDoc} */
   @Override
   public long getAvailableVirtualMemorySize() {
-    readProcMemInfoFile(true);
-    return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
+    return getAvailablePhysicalMemorySize() + (swapSizeFree * 1024);
   }
 
   /** {@inheritDoc} */

+ 65 - 3
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -260,6 +260,18 @@
   </description>
 </property>
 
+<property>
+    <name>hadoop.security.group.mapping.ldap.search.attr.memberof</name>
+    <value></value>
+    <description>
+      The attribute of the user object that identifies its group objects. By
+      default, Hadoop makes two LDAP queries per user if this value is empty. If
+      set, Hadoop will attempt to resolve group names from this attribute,
+      instead of making the second LDAP query to get group objects. The value
+      should be 'memberOf' for an MS AD installation.
+    </description>
+</property>
+
 <property>
   <name>hadoop.security.group.mapping.ldap.search.attr.member</name>
   <value>member</value>
@@ -656,7 +668,7 @@
 <property>
   <name>fs.s3.buffer.dir</name>
   <value>${hadoop.tmp.dir}/s3</value>
-  <description>Determines where on the local filesystem the S3 filesystem
+  <description>Determines where on the local filesystem the s3:/s3n: filesystem
   should store files before sending them to S3
   (or after retrieving them from S3).
   </description>
@@ -739,7 +751,7 @@
   <name>fs.s3n.server-side-encryption-algorithm</name>
   <value></value>
   <description>Specify a server-side encryption algorithm for S3.
-  The default is NULL, and the only other currently allowable value is AES256.
+  Unset by default, and the only other currently allowable value is AES256.
   </description>
 </property>
 
@@ -902,12 +914,26 @@
   <description>Minimum age in seconds of multipart uploads to purge</description>
 </property>
 
+<property>
+  <name>fs.s3a.server-side-encryption-algorithm</name>
+  <description>Specify a server-side encryption algorithm for s3a: file system.
+    Unset by default, and the only other currently allowable value is AES256.
+  </description>
+</property>
+
 <property>
   <name>fs.s3a.signing-algorithm</name>
   <description>Override the default signing algorithm so legacy
     implementations can still be used</description>
 </property>
 
+<property>
+  <name>fs.s3a.block.size</name>
+  <value>33554432</value>
+  <description>Block size to use when reading files using s3a: file system.
+  </description>
+</property>
+
 <property>
   <name>fs.s3a.buffer.dir</name>
   <value>${hadoop.tmp.dir}/s3a</value>
@@ -924,13 +950,36 @@
     uploading (fs.s3a.threads.max) or queueing (fs.s3a.max.total.tasks)</description>
 </property>
 
-  <property>
+<property>
+  <name>fs.s3a.readahead.range</name>
+  <value>65536</value>
+  <description>Bytes to read ahead during a seek() before closing and
+  re-opening the S3 HTTP connection. This option will be overridden if
+  any call to setReadahead() is made to an open stream.</description>
+</property>
+
+<property>
   <name>fs.s3a.fast.buffer.size</name>
   <value>1048576</value>
   <description>Size of initial memory buffer in bytes allocated for an
     upload. No effect if fs.s3a.fast.upload is false.</description>
 </property>
 
+<property>
+  <name>fs.s3a.user.agent.prefix</name>
+  <value></value>
+  <description>
+    Sets a custom value that will be prepended to the User-Agent header sent in
+    HTTP requests to the S3 back-end by S3AFileSystem.  The User-Agent header
+    always includes the Hadoop version number followed by a string generated by
+    the AWS SDK.  An example is "User-Agent: Hadoop 2.8.0, aws-sdk-java/1.10.6".
+    If this optional property is set, then its value is prepended to create a
+    customized User-Agent.  For example, if this configuration property was set
+    to "MyApp", then an example of the resulting User-Agent would be
+    "User-Agent: MyApp, Hadoop 2.8.0, aws-sdk-java/1.10.6".
+  </description>
+</property>
+
 <property>
   <name>fs.s3a.impl</name>
   <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
@@ -2106,4 +2155,17 @@
     <name>hadoop.http.logs.enabled</name>
     <value>true</value>
   </property>
+
+  <property>
+    <name>fs.client.resolve.topology.enabled</name>
+    <value>false</value>
+    <description>Whether the client machine will use the class specified by
+      property net.topology.node.switch.mapping.impl to compute the network
+      distance between itself and remote machines of the FileSystem. Additional
+      properties might need to be configured depending on the class specified
+      in net.topology.node.switch.mapping.impl. For example, if
+      org.apache.hadoop.net.ScriptBasedMapping is used, a valid script file
+      needs to be specified in net.topology.script.file.name.
+    </description>
+  </property>
 </configuration>

+ 15 - 7
hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md

@@ -104,9 +104,9 @@ Usage: `hadoop credential <subcommand> [options]`
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| create *alias* [-provider *provider-path*] | Prompts the user for a credential to be stored as the given alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential with the provided alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. The command asks for confirmation unless `-f` is specified |
-| list [-provider *provider-path*] | Lists all of the credential aliases The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] [-strict] [-value *credential-value*] | Prompts the user for a credential to be stored as the given alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. The `-strict` flag will cause the command to fail if the provider uses a default password. Use `-value` flag to supply the credential value (a.k.a. the alias password) instead of being prompted. |
+| delete *alias* [-provider *provider-path*] [-strict] [-f] | Deletes the credential with the provided alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. The `-strict` flag will cause the command to fail if the provider uses a default password. The command asks for confirmation unless `-f` is specified |
+| list [-provider *provider-path*] [-strict] | Lists all of the credential aliases The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. The `-strict` flag will cause the command to fail if the provider uses a default password. |
 
 Command to manage credentials, passwords and secrets within credential providers.
 
@@ -116,6 +116,8 @@ indicates that the current user's credentials file should be consulted through t
 
 When utilizing the credential command it will often be for provisioning a password or secret to a particular credential store provider. In order to explicitly indicate which provider store to use the `-provider` option should be used. Otherwise, given a path of multiple providers, the first non-transient provider will be used. This may or may not be the one that you intended.
 
+Providers frequently require that a password or other secret is supplied. If the provider requires a password and is unable to find one, it will use a default password and emit a warning message that the default password is being used. If the `-strict` flag is supplied, the warning message becomes an error message and the command returns immediately with an error status.
+
 Example: `hadoop credential list -provider jceks://file/tmp/test.jceks`
 
 ### `distch`
@@ -157,6 +159,10 @@ For every subcommand that connects to a service, convenience flags are provided
 
 This command is documented in the [File System Shell Guide](./FileSystemShell.html). It is a synonym for `hdfs dfs` when HDFS is in use.
 
+### `gridmix`
+
+Gridmix is a benchmark tool for Hadoop cluster. More information can be found in the [Gridmix Guide](../../hadoop-gridmix/GridMix.html).
+
 ### `jar`
 
 Usage: `hadoop jar <jar> [mainClass] args...`
@@ -186,14 +192,16 @@ Usage: `hadoop key <subcommand> [options]`
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| create *keyname* [-cipher *cipher*] [-size *size*] [-description *description*] [-attr *attribute=value*] [-provider *provider*] [-help] | Creates a new key for the name specified by the *keyname* argument within the provider specified by the `-provider` argument. You may specify a cipher with the `-cipher` argument. The default cipher is currently "AES/CTR/NoPadding". The default keysize is 128. You may specify the requested key length using the `-size` argument. Arbitrary attribute=value style attributes may be specified using the `-attr` argument. `-attr` may be specified multiple times, once per attribute. |
-| roll *keyname* [-provider *provider*] [-help] | Creates a new version for the specified key within the provider indicated using the `-provider` argument |
-| delete *keyname* [-provider *provider*] [-f] [-help] | Deletes all versions of the key specified by the *keyname* argument from within the provider specified by `-provider`. The command asks for user confirmation unless `-f` is specified. |
-| list [-provider *provider*] [-metadata] [-help] | Displays the keynames contained within a particular provider as configured in core-site.xml or specified with the `-provider` argument. `-metadata` displays the metadata. |
+| create *keyname* [-cipher *cipher*] [-size *size*] [-description *description*] [-attr *attribute=value*] [-provider *provider*] [-strict] [-help] | Creates a new key for the name specified by the *keyname* argument within the provider specified by the `-provider` argument. The `-strict` flag will cause the command to fail if the provider uses a default password. You may specify a cipher with the `-cipher` argument. The default cipher is currently "AES/CTR/NoPadding". The default keysize is 128. You may specify the requested key length using the `-size` argument. Arbitrary attribute=value style attributes may be specified using the `-attr` argument. `-attr` may be specified multiple times, once per attribute. |
+| roll *keyname* [-provider *provider*] [-strict] [-help] | Creates a new version for the specified key within the provider indicated using the `-provider` argument. The `-strict` flag will cause the command to fail if the provider uses a default password. |
+| delete *keyname* [-provider *provider*] [-strict] [-f] [-help] | Deletes all versions of the key specified by the *keyname* argument from within the provider specified by `-provider`. The `-strict` flag will cause the command to fail if the provider uses a default password. The command asks for user confirmation unless `-f` is specified. |
+| list [-provider *provider*] [-strict] [-metadata] [-help] | Displays the keynames contained within a particular provider as configured in core-site.xml or specified with the `-provider` argument. The `-strict` flag will cause the command to fail if the provider uses a default password. `-metadata` displays the metadata. |
 | -help | Prints usage of this command |
 
 Manage keys via the KeyProvider. For details on KeyProviders, see the [Transparent Encryption Guide](../hadoop-hdfs/TransparentEncryption.html).
 
+Providers frequently require that a password or other secret is supplied. If the provider requires a password and is unable to find one, it will use a default password and emit a warning message that the default password is being used. If the `-strict` flag is supplied, the warning message becomes an error message and the command returns immediately with an error status.
+
 NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider) does not support uppercase key names.
 
 ### `trace`

+ 5 - 0
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -27,6 +27,11 @@ Most of the commands in FS shell behave like corresponding Unix commands. Differ
 
 If HDFS is being used, `hdfs dfs` is a synonym.
 
+Relative paths can be used. For HDFS, the current working directory is the
+HDFS home directory `/user/<username>` that often has to be created manually.
+The HDFS home directory can also be implicitly accessed, e.g., when using the
+HDFS trash folder, the `.Trash` directory in the home directory.
+
 See the [Commands Manual](./CommandsManual.html) for generic shell options.
 
 appendToFile

+ 6 - 0
hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md

@@ -98,6 +98,12 @@ To secure the connection, the implementation supports LDAP over SSL (LDAPS). SSL
 In addition, specify the path to the keystore file for SSL connection in `hadoop.security.group.mapping.ldap.ssl.keystore` and keystore password in `hadoop.security.group.mapping.ldap.ssl.keystore.password`.
 Alternatively, store the keystore password in a file, and point `hadoop.security.group.mapping.ldap.ssl.keystore.password.file` to that file. For security purposes, this file should be readable only by the Unix user running the daemons.
 
+### Low latency group mapping resolution ###
+Typically, Hadoop resolves a user's group names by making two LDAP queries: the first query gets the user object, and the second query uses the user's Distinguished Name to find the groups.
+For some LDAP servers, such as Active Directory, the user object returned in the first query also contains the DN of the user's groups in its `memberOf` attribute, and the name of a group is its Relative Distinguished Name.
+Therefore, it is possible to infer the user's groups from the first query without sending the second one, and it may reduce group name resolution latency incurred by the second query. If it fails to get group names, it will fall back to the typical two-query scenario and send the second query to get group names.
+To enable this feature, set `hadoop.security.group.mapping.ldap.search.attr.memberof` to `memberOf`, and Hadoop will resolve group names using this attribute in the user object.
+
 Composite Groups Mapping
 --------
 `CompositeGroupsMapping` works by enumerating a list of service providers in `hadoop.security.group.mapping.providers`.

+ 6 - 0
hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md

@@ -476,6 +476,7 @@ KDiag: Diagnose Kerberos Problems
   [--out <file>] : Write output to a file.
   [--resource <resource>] : Load an XML configuration resource.
   [--secure] : Require the hadoop configuration to be secure.
+  [--verifyshortname <principal>]: Verify the short name of the specific principal does not contain '@' or '/'
 ```
 
 #### `--jaas`: Require a JAAS file to be defined in `java.security.auth.login.config`.
@@ -574,6 +575,11 @@ or implicitly set to "simple":
 
 Needless to say, an application so configured cannot talk to a secure Hadoop cluster.
 
+#### `--verifyshortname &lt;principal>`: validate the short name of a principal
+
+This verifies that the short name of a principal contains neither the `"@"`
+nor `"/"` characters.
+
 ### Example
 
 ```

+ 83 - 4
hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md

@@ -85,11 +85,11 @@ Apache Hadoop allows for third parties to easily add new features through a vari
 
 Core to this functionality is the concept of a shell profile.  Shell profiles are shell snippets that can do things such as add jars to the classpath, configure Java system properties and more.
 
-Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_HOME}/libexec/shellprofile.d`.  Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user.  Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
+Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_HOME}/libexec/shellprofile.d`.  Shell profiles in the `libexec` directory are part of the base installation and cannot be overridden by the user.  Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
 
 An example of a shell profile is in the libexec directory.
 
-## Shell API
+### Shell API
 
 Apache Hadoop's shell code has a [function library](./UnixShellAPI.html) that is open for administrators and developers to use to assist in their configuration and advanced feature management.  These APIs follow the standard [Apache Hadoop Interface Classification](./InterfaceClassification.html), with one addition: Replaceable.
 
@@ -97,10 +97,8 @@ The shell code allows for core functions to be overridden. However, not all func
 
 In order to replace a function, create a file called `hadoop-user-functions.sh` in the `${HADOOP_CONF_DIR}` directory.  Simply define the new, replacement function in this file and the system will pick it up automatically.  There may be as many replacement functions as needed in this file.  Examples of function replacement are in the `hadoop-user-functions.sh.examples` file.
 
-
 Functions that are marked Public and Stable are safe to use in shell profiles as-is.  Other functions may change in a minor release.
 
-
 ### User-level API Access
 
 In addition to `.hadoop-env`, which allows individual users to override `hadoop-env.sh`, user's may also use `.hadooprc`.  This is called after the Apache Hadoop shell environment has been configured and allows the full set of shell API function calls.
@@ -112,3 +110,84 @@ hadoop_add_classpath /some/path/custom.jar
 ```
 
 would go into `.hadooprc`
+
+### Dynamic Subcommands
+
+Utilizing the Shell API, it is possible for third parties to add their own subcommands to the primary Hadoop shell scripts (hadoop, hdfs, mapred, yarn).
+
+Prior to executing a subcommand, the primary scripts will check for the existence of a (scriptname)\_subcommand\_(subcommand) function.  This function gets executed with the parameters set to all remaining command line arguments.  For example, if the following function is defined:
+
+```bash
+function yarn_subcommand_hello
+{
+  echo "$@"
+  exit $?
+}
+```
+
+then executing `yarn --debug hello world I see you` will activate script debugging and call the `yarn_subcommand_hello` function as:
+
+```bash
+yarn_subcommand_hello world I see you
+```
+
+which will result in the output of:
+
+```bash
+world I see you
+```
+
+It is also possible to add the new subcommands to the usage output. The `hadoop_add_subcommand` function adds text to the usage output.  Utilizing the standard HADOOP_SHELL_EXECNAME variable, we can limit which command gets our new function.
+
+```bash
+if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
+  hadoop_add_subcommand "hello" "Print some text to the screen"
+fi
+```
+
+This functionality may also be use to override the built-ins.  For example, defining:
+
+```bash
+function hdfs_subcommand_fetchdt
+{
+  ...
+}
+```
+
+... will replace the existing `hdfs fetchdt` subcommand with a custom one.
+
+Some key environment variables related to Dynamic Subcommands:
+
+* HADOOP\_CLASSNAME
+
+This is the name of the Java class to use when program execution continues.
+
+* HADOOP\_SHELL\_EXECNAME
+
+This is the name of the script that is being executed.  It will be one of hadoop, hdfs, mapred, or yarn.
+
+* HADOOP\_SUBCMD
+
+This is the subcommand that was passed on the command line.
+
+* HADOOP\_SUBCMD\_ARGS
+
+This array contains the argument list after the Apache Hadoop common argument processing has taken place and is the same list that is passed to the subcommand function as arguments.  For example, if `hadoop --debug subcmd 1 2 3` has been executed on the command line, then `${HADOOP_SUBCMD_ARGS[0]}` will be 1 and `hadoop_subcommand_subcmd` will also have $1 equal to 1.  This array list MAY be modified by subcommand functions to add or delete values from the argument list for further processing.
+
+* HADOOP\_SUBCMD\_SECURESERVICE
+
+If this command should/will be executed as a secure daemon, set this to true.
+
+* HADOOP\_SUBCMD\_SECUREUSER
+
+If this command should/will be executed as a secure daemon, set the user name to be used.
+
+* HADOOP\_SUBCMD\_SUPPORTDAEMONIZATION
+
+If this command can be executed as a daemon, set this to true.
+
+* HADOOP\_USER\_PARAMS
+
+This is the full content of the command line, prior to any parsing done. It will contain flags such as `--debug`.  It MAY NOT be manipulated.
+
+The Apache Hadoop runtime facilities require functions exit if no further processing is required.  For example, in the hello example above, Java and other facilities were not required so a simple `exit $?` was sufficient.  However, if the function were to utilize `HADOOP_CLASSNAME`, then program execution must continue so that Java with the Apache Hadoop-specific parameters will be launched against the given Java class. Another example would be in the case of an unrecoverable error.  It is the function's responsibility to print an appropriate message (preferably using the hadoop_error API call) and exit appropriately.

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java

@@ -267,7 +267,7 @@ public class TestKeyProviderFactory {
     Path path = ProviderUtils.unnestUri(new URI(ourUrl));
     FileSystem fs = path.getFileSystem(conf);
     FileStatus s = fs.getFileStatus(path);
-    assertTrue(s.getPermission().toString().equals("rwx------"));
+    assertTrue(s.getPermission().toString().equals("rw-------"));
     assertTrue(file + " should exist", file.isFile());
 
     // Corrupt file and Check if JKS can reload from _OLD file

+ 27 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -25,6 +25,7 @@ import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -115,6 +116,12 @@ public class TestKeyShell {
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains(keyName + " has been " +
             "successfully created"));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_WARN));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_INSTRUCTIONS_DOC));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_CONT));
 
     String listOut = listKeys(ks, false);
     assertTrue(listOut.contains(keyName));
@@ -129,7 +136,7 @@ public class TestKeyShell {
     rc = ks.run(args2);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("key1 has been successfully " +
-    		"rolled."));
+        "rolled."));
 
     deleteKey(ks, keyName);
 
@@ -192,8 +199,7 @@ public class TestKeyShell {
     ks.setConf(new Configuration());
     rc = ks.run(args1);
     assertEquals(1, rc);
-    assertTrue(outContent.toString().contains("There are no valid " +
-    		"KeyProviders configured."));
+    assertTrue(outContent.toString().contains(KeyShell.NO_VALID_PROVIDERS));
   }
 
   @Test
@@ -207,7 +213,7 @@ public class TestKeyShell {
     rc = ks.run(args1);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
-    		"transient provider."));
+        "transient provider."));
   }
   
   @Test
@@ -221,8 +227,23 @@ public class TestKeyShell {
     ks.setConf(config);
     rc = ks.run(args1);
     assertEquals(1, rc);
-    assertTrue(outContent.toString().contains("There are no valid " +
-    		"KeyProviders configured."));
+    assertTrue(outContent.toString().contains(KeyShell.NO_VALID_PROVIDERS));
+  }
+
+  @Test
+  public void testStrict() throws Exception {
+    outContent.reset();
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    final String[] args1 = {"create", "hello", "-provider", jceksProvider,
+        "-strict"};
+    rc = ks.run(args1);
+    assertEquals(1, rc);
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_ERROR));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_INSTRUCTIONS_DOC));
   }
 
   @Test

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -132,6 +132,7 @@ public class TestFilterFileSystem {
     public Path fixRelativePart(Path p);
     public ContentSummary getContentSummary(Path f);
     public QuotaUsage getQuotaUsage(Path f);
+    StorageStatistics getStorageStatistics();
   }
 
   @Test

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -221,6 +221,7 @@ public class TestHarFileSystem {
     public Path getTrashRoot(Path path) throws IOException;
 
     public Collection<FileStatus> getTrashRoots(boolean allUsers) throws IOException;
+    StorageStatistics getStorageStatistics();
   }
 
   @Test

+ 2 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java

@@ -73,7 +73,8 @@ public abstract class AbstractBondedFSContract extends AbstractFSContract {
       } catch (URISyntaxException e) {
         throw new IOException("Invalid URI " + fsName);
       } catch (IllegalArgumentException e) {
-        throw new IOException("Invalid URI " + fsName, e);
+        throw new IOException("Unable to initialize filesystem " + fsName
+            + ": " + e, e);
       }
     } else {
       LOG.info("skipping tests as FS name is not defined in "

+ 67 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFileAppend.java

@@ -248,6 +248,73 @@ public class TestSequenceFileAppend {
     fs.deleteOnExit(file);
   }
 
+  @Test(timeout = 30000)
+  public void testAppendNoneCompression() throws Exception {
+    Path file = new Path(ROOT_PATH, "testseqappendnonecompr.seq");
+    fs.delete(file, true);
+
+    Option compressOption = Writer.compression(CompressionType.NONE);
+    Writer writer =
+        SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+            SequenceFile.Writer.keyClass(Long.class),
+            SequenceFile.Writer.valueClass(String.class), compressOption);
+
+    writer.append(1L, "one");
+    writer.append(2L, "two");
+    writer.close();
+
+    verify2Values(file);
+
+    writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+        SequenceFile.Writer.keyClass(Long.class),
+        SequenceFile.Writer.valueClass(String.class),
+        SequenceFile.Writer.appendIfExists(true), compressOption);
+
+    writer.append(3L, "three");
+    writer.append(4L, "four");
+    writer.close();
+
+    verifyAll4Values(file);
+
+    // Verify failure if the compression details are different or not Provided
+    try {
+      writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+          SequenceFile.Writer.keyClass(Long.class),
+          SequenceFile.Writer.valueClass(String.class),
+          SequenceFile.Writer.appendIfExists(true));
+      writer.close();
+      fail("Expected IllegalArgumentException for compression options");
+    } catch (IllegalArgumentException iae) {
+      // Expected exception. Ignore it
+    }
+
+    // Verify failure if the compression details are different
+    try {
+      Option wrongCompressOption =
+          Writer.compression(CompressionType.RECORD, new GzipCodec());
+
+      writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+          SequenceFile.Writer.keyClass(Long.class),
+          SequenceFile.Writer.valueClass(String.class),
+          SequenceFile.Writer.appendIfExists(true), wrongCompressOption);
+      writer.close();
+      fail("Expected IllegalArgumentException for compression options");
+    } catch (IllegalArgumentException iae) {
+      // Expected exception. Ignore it
+    }
+
+    // Codec should be ignored
+    Option noneWithCodec =
+        Writer.compression(CompressionType.NONE, new DefaultCodec());
+
+    writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+        SequenceFile.Writer.keyClass(Long.class),
+        SequenceFile.Writer.valueClass(String.class),
+        SequenceFile.Writer.appendIfExists(true), noneWithCodec);
+    writer.close();
+    fs.deleteOnExit(file);
+  }
+
   @Test(timeout = 30000)
   public void testAppendSort() throws Exception {
     GenericTestUtils.assumeInNativeProfile();

+ 113 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/FakeCompressor.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * A fake compressor
+ * Its input and output is the same.
+ */
+class FakeCompressor implements Compressor {
+
+  private boolean finish;
+  private boolean finished;
+  private int nread;
+  private int nwrite;
+
+  private byte[] userBuf;
+  private int userBufOff;
+  private int userBufLen;
+
+  @Override
+  public int compress(byte[] b, int off, int len) throws IOException {
+    int n = Math.min(len, userBufLen);
+    if (userBuf != null && b != null)
+      System.arraycopy(userBuf, userBufOff, b, off, n);
+    userBufOff += n;
+    userBufLen -= n;
+    nwrite += n;
+
+    if (finish && userBufLen <= 0)
+      finished = true;
+
+    return n;
+  }
+
+  @Override
+  public void end() {
+    // nop
+  }
+
+  @Override
+  public void finish() {
+    finish = true;
+  }
+
+  @Override
+  public boolean finished() {
+    return finished;
+  }
+
+  @Override
+  public long getBytesRead() {
+    return nread;
+  }
+
+  @Override
+  public long getBytesWritten() {
+    return nwrite;
+  }
+
+  @Override
+  public boolean needsInput() {
+    return userBufLen <= 0;
+  }
+
+  @Override
+  public void reset() {
+    finish = false;
+    finished = false;
+    nread = 0;
+    nwrite = 0;
+    userBuf = null;
+    userBufOff = 0;
+    userBufLen = 0;
+  }
+
+  @Override
+  public void setDictionary(byte[] b, int off, int len) {
+    // nop
+  }
+
+  @Override
+  public void setInput(byte[] b, int off, int len) {
+    nread += len;
+    userBuf = b;
+    userBufOff = off;
+    userBufLen = len;
+  }
+
+  @Override
+  public void reinit(Configuration conf) {
+    // nop
+  }
+
+}

+ 109 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/FakeDecompressor.java

@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+
+/**
+ * A fake decompressor, just like FakeCompressor
+ * Its input and output is the same.
+ */
+class FakeDecompressor implements Decompressor {
+
+  private boolean finish;
+  private boolean finished;
+  private int nread;
+  private int nwrite;
+
+  private byte[] userBuf;
+  private int userBufOff;
+  private int userBufLen;
+
+  @Override
+  public int decompress(byte[] b, int off, int len) throws IOException {
+    int n = Math.min(len, userBufLen);
+    if (userBuf != null && b != null)
+      System.arraycopy(userBuf, userBufOff, b, off, n);
+    userBufOff += n;
+    userBufLen -= n;
+    nwrite += n;
+
+    if (finish && userBufLen <= 0)
+      finished = true;
+
+    return n;
+  }
+
+  @Override
+  public void end() {
+    // nop
+  }
+
+  @Override
+  public boolean finished() {
+    return finished;
+  }
+
+  public long getBytesRead() {
+    return nread;
+  }
+
+  public long getBytesWritten() {
+    return nwrite;
+  }
+
+  @Override
+  public boolean needsDictionary() {
+    return false;
+  }
+
+  @Override
+  public boolean needsInput() {
+    return userBufLen <= 0;
+  }
+
+  @Override
+  public void reset() {
+    finish = false;
+    finished = false;
+    nread = 0;
+    nwrite = 0;
+    userBuf = null;
+    userBufOff = 0;
+    userBufLen = 0;
+  }
+
+  @Override
+  public void setDictionary(byte[] b, int off, int len) {
+    // nop
+  }
+
+  @Override
+  public void setInput(byte[] b, int off, int len) {
+    nread += len;
+    userBuf = b;
+    userBufOff = off;
+    userBufLen = len;
+  }
+
+  @Override
+  public int getRemaining() {
+    return 0;
+  }
+
+}

+ 2 - 178
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java

@@ -25,7 +25,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.hadoop.conf.Configuration;
 import org.junit.Test;
 
 public class TestBlockDecompressorStream {
@@ -67,187 +66,12 @@ public class TestBlockDecompressorStream {
     bytesIn = new ByteArrayInputStream(buf);
     
     // get decompression stream
-    BlockDecompressorStream blockDecompressorStream = 
-      new BlockDecompressorStream(bytesIn, new FakeDecompressor(), 1024);
-    try {
+    try (BlockDecompressorStream blockDecompressorStream =
+      new BlockDecompressorStream(bytesIn, new FakeDecompressor(), 1024)) {
       assertEquals("return value is not -1", 
           -1 , blockDecompressorStream.read());
     } catch (IOException e) {
       fail("unexpected IOException : " + e);
-    } finally {
-      blockDecompressorStream.close();
     }
   }
-}
-
-/**
- * A fake compressor
- * Its input and output is the same.
- */
-class FakeCompressor implements Compressor{
-
-  private boolean finish;
-  private boolean finished;
-  int nread;
-  int nwrite;
-  
-  byte [] userBuf;
-  int userBufOff;
-  int userBufLen;
-  
-  @Override
-  public int compress(byte[] b, int off, int len) throws IOException {
-    int n = Math.min(len, userBufLen);
-    if (userBuf != null && b != null)
-      System.arraycopy(userBuf, userBufOff, b, off, n);
-    userBufOff += n;
-    userBufLen -= n;
-    nwrite += n;
-    
-    if (finish && userBufLen <= 0)
-      finished = true;   
-        
-    return n;
-  }
-
-  @Override
-  public void end() {
-    // nop
-  }
-
-  @Override
-  public void finish() {
-    finish = true;
-  }
-
-  @Override
-  public boolean finished() {
-    return finished;
-  }
-
-  @Override
-  public long getBytesRead() {
-    return nread;
-  }
-
-  @Override
-  public long getBytesWritten() {
-    return nwrite;
-  }
-
-  @Override
-  public boolean needsInput() {
-    return userBufLen <= 0;
-  }
-
-  @Override
-  public void reset() {
-    finish = false;
-    finished = false;
-    nread = 0;
-    nwrite = 0;
-    userBuf = null;
-    userBufOff = 0;
-    userBufLen = 0;
-  }
-
-  @Override
-  public void setDictionary(byte[] b, int off, int len) {
-    // nop
-  }
-
-  @Override
-  public void setInput(byte[] b, int off, int len) {
-    nread += len;
-    userBuf = b;
-    userBufOff = off;
-    userBufLen = len;
-  }
-
-  @Override
-  public void reinit(Configuration conf) {
-    // nop
-  }
-  
-}
-
-/**
- * A fake decompressor, just like FakeCompressor
- * Its input and output is the same.
- */
-class FakeDecompressor implements Decompressor {
-  
-  private boolean finish;
-  private boolean finished;
-  int nread;
-  int nwrite;
-  
-  byte [] userBuf;
-  int userBufOff;
-  int userBufLen;
-
-  @Override
-  public int decompress(byte[] b, int off, int len) throws IOException {
-    int n = Math.min(len, userBufLen);
-    if (userBuf != null && b != null)
-      System.arraycopy(userBuf, userBufOff, b, off, n);
-    userBufOff += n;
-    userBufLen -= n;
-    nwrite += n;
-    
-    if (finish && userBufLen <= 0)
-      finished = true;
-    
-    return n;
-  }
-
-  @Override
-  public void end() {
-    // nop
-  }
-
-  @Override
-  public boolean finished() {
-    return finished;
-  }
-
-  @Override
-  public boolean needsDictionary() {
-    return false;
-  }
-
-  @Override
-  public boolean needsInput() {
-    return userBufLen <= 0;
-  }
-
-  @Override
-  public void reset() {
-    finish = false;
-    finished = false;
-    nread = 0;
-    nwrite = 0;
-    userBuf = null;
-    userBufOff = 0;
-    userBufLen = 0;
-  }
-
-  @Override
-  public void setDictionary(byte[] b, int off, int len) {
-    // nop
-  }
-
-  @Override
-  public void setInput(byte[] b, int off, int len) {
-    nread += len;
-    userBuf = b;
-    userBufOff = off;
-    userBufLen = len;
-  }
-
-  @Override
-  public int getRemaining() {
-    return 0;
-  }
-  
 }

+ 99 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java

@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestDecompressorStream {
+  private static final String TEST_STRING =
+      "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+  private ByteArrayInputStream bytesIn;
+  private Decompressor decompressor;
+  private DecompressorStream decompressorStream;
+
+  @Before
+  public void setUp() throws IOException {
+    bytesIn = new ByteArrayInputStream(TEST_STRING.getBytes());
+    decompressor = new FakeDecompressor();
+    decompressorStream =
+        new DecompressorStream(bytesIn, decompressor, 20, 13);
+  }
+
+  @Test
+  public void testReadOneByte() throws IOException {
+    for (int i = 0; i < TEST_STRING.length(); ++i) {
+      assertThat(decompressorStream.read(), is((int) TEST_STRING.charAt(i)));
+    }
+    try {
+      int ret = decompressorStream.read();
+      fail("Not reachable but got ret " + ret);
+    } catch (EOFException e) {
+      // Expect EOF exception
+    }
+  }
+
+  @Test
+  public void testReadBuffer() throws IOException {
+    // 32 buf.length < 52 TEST_STRING.length()
+    byte[] buf = new byte[32];
+    int bytesToRead = TEST_STRING.length();
+    int i = 0;
+    while (bytesToRead > 0) {
+      int n = Math.min(bytesToRead, buf.length);
+      int bytesRead = decompressorStream.read(buf, 0, n);
+      assertTrue(bytesRead > 0 && bytesRead <= n);
+      assertThat(new String(buf, 0, bytesRead),
+          is(TEST_STRING.substring(i, i + bytesRead)));
+      bytesToRead = bytesToRead - bytesRead;
+      i = i + bytesRead;
+    }
+    try {
+      int ret = decompressorStream.read(buf, 0, buf.length);
+      fail("Not reachable but got ret " + ret);
+    } catch (EOFException e) {
+      // Expect EOF exception
+    }
+  }
+
+  @Test
+  public void testSkip() throws IOException {
+    assertThat(decompressorStream.skip(12), is(12L));
+    assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(12)));
+    assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(13)));
+    assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(14)));
+    assertThat(decompressorStream.skip(10), is(10L));
+    assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(25)));
+    try {
+      long ret = decompressorStream.skip(1000);
+      fail("Not reachable but got ret " + ret);
+    } catch (EOFException e) {
+      // Expect EOF exception
+    }
+  }
+}

+ 20 - 47
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java

@@ -18,55 +18,32 @@
 
 package org.apache.hadoop.io.retry;
 
-import static org.apache.hadoop.io.retry.RetryPolicies.RETRY_FOREVER;
-import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryOtherThanRemoteException;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryForeverWithFixedSleep;
-import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
+import org.apache.hadoop.io.retry.RetryPolicies.*;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
-import org.apache.hadoop.io.retry.RetryPolicies.RetryUpToMaximumCountWithFixedSleep;
-import org.apache.hadoop.io.retry.RetryPolicies.RetryUpToMaximumTimeWithFixedSleep;
-import org.apache.hadoop.io.retry.RetryPolicies.TryOnceThenFail;
 import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
 import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RemoteException;
-
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
+import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.hadoop.io.retry.RetryPolicies.*;
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.*;
 
 public class TestRetryProxy {
   
@@ -131,25 +108,21 @@ public class TestRetryProxy {
     final UnreliableInterface unreliable = (UnreliableInterface)
       RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
     assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
-    
+
+    final AtomicInteger count = new AtomicInteger();
     // Embed the proxy in ProtocolTranslator
     ProtocolTranslator xlator = new ProtocolTranslator() {
-      int count = 0;
       @Override
       public Object getUnderlyingProxyObject() {
-        count++;
+        count.getAndIncrement();
         return unreliable;
       }
-      @Override
-      public String toString() {
-        return "" + count;
-      }
     };
     
     // For a proxy wrapped in ProtocolTranslator method should return true
     assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
     // Ensure underlying proxy was looked at
-    assertEquals(xlator.toString(), "1");
+    assertEquals(1, count.get());
     
     // For non-proxy the method must return false
     assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));

+ 35 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java

@@ -28,6 +28,7 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.log4j.*;
+import org.junit.Assert;
 
 public class TestLogLevel extends TestCase {
   static final PrintStream out = System.out;
@@ -42,12 +43,13 @@ public class TestLogLevel extends TestCase {
       log.debug("log.debug1");
       log.info("log.info1");
       log.error("log.error1");
-      assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
+      Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.",
+          Level.ERROR, log.getEffectiveLevel());
 
       HttpServer2 server = new HttpServer2.Builder().setName("..")
           .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
           .build();
-      
+
       server.start();
       String authority = NetUtils.getHostPortString(server
           .getConnectorAddress(0));
@@ -67,7 +69,8 @@ public class TestLogLevel extends TestCase {
       log.debug("log.debug2");
       log.info("log.info2");
       log.error("log.error2");
-      assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
+      assertEquals("Try setting log level: ERROR from servlet.", Level.ERROR,
+          log.getEffectiveLevel());
 
       //command line
       String[] args = {"-setlevel", authority, logName, Level.DEBUG.toString()};
@@ -75,7 +78,35 @@ public class TestLogLevel extends TestCase {
       log.debug("log.debug3");
       log.info("log.info3");
       log.error("log.error3");
-      assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
+      assertEquals("Try setting log level: DEBUG via command line", Level.DEBUG,
+          log.getEffectiveLevel());
+
+      // Test mixed upper case and lower case in level string.
+      String[] args2 = {"-setlevel", authority, logName, "Info"};
+      LogLevel.main(args2);
+      log.debug("log.debug4");
+      log.info("log.info4");
+      log.error("log.error4");
+      assertEquals("Try setting log level: Info via command line.", Level.INFO,
+          log.getEffectiveLevel());
+
+      // Test "Error" instead of "ERROR" should work for servlet
+      URL newUrl = new URL("http://" + authority + "/logLevel?log=" + logName
+          + "&level=" + "Error");
+      out.println("*** Connecting to " + newUrl);
+      connection = newUrl.openConnection();
+      connection.connect();
+
+      BufferedReader in2 = new BufferedReader(new InputStreamReader(
+          connection.getInputStream()));
+      for(String line; (line = in2.readLine()) != null; out.println(line));
+      in2.close();
+
+      log.debug("log.debug5");
+      log.info("log.info5");
+      log.error("log.error5");
+      assertEquals("Try setting log level: Error via servlet.", Level.ERROR,
+          log.getEffectiveLevel());
     }
     else {
       out.println(testlog.getClass() + " not tested.");

+ 16 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java

@@ -164,6 +164,22 @@ public class TestKDiag extends Assert {
         ARG_PRINCIPAL, "foo@EXAMPLE.COM");
   }
 
+  @Test
+  public void testKerberosName() throws Throwable {
+    kdiagFailure(ARG_KEYLEN, KEYLEN,
+            ARG_VERIFYSHORTNAME,
+            ARG_PRINCIPAL, "foo/foo/foo@BAR.COM");
+  }
+
+  @Test
+  public void testShortName() throws Throwable {
+    kdiag(ARG_KEYLEN, KEYLEN,
+            ARG_KEYTAB, keytab.getAbsolutePath(),
+            ARG_PRINCIPAL,
+            ARG_VERIFYSHORTNAME,
+            ARG_PRINCIPAL, "foo@EXAMPLE.COM");
+  }
+
   @Test
   public void testFileOutput() throws Throwable {
     File f = new File("target/kdiag.txt");

+ 18 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java

@@ -19,7 +19,11 @@ package org.apache.hadoop.security;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.FileWriter;
@@ -31,7 +35,6 @@ import java.util.List;
 import javax.naming.CommunicationException;
 import javax.naming.NamingException;
 import javax.naming.directory.SearchControls;
-import javax.naming.directory.SearchResult;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -47,18 +50,17 @@ import org.junit.Test;
 public class TestLdapGroupsMapping extends TestLdapGroupsMappingBase {
   @Before
   public void setupMocks() throws NamingException {
-    SearchResult mockUserResult = mock(SearchResult.class);
-    when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
-    when(mockUserResult.getNameInNamespace()).thenReturn("CN=some_user,DC=test,DC=com");
+    when(getUserSearchResult().getNameInNamespace()).
+        thenReturn("CN=some_user,DC=test,DC=com");
   }
   
   @Test
   public void testGetGroups() throws IOException, NamingException {
     // The search functionality of the mock context is reused, so we will
     // return the user NamingEnumeration first, and then the group
-    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+    when(getContext().search(anyString(), anyString(), any(Object[].class),
         any(SearchControls.class)))
-        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+        .thenReturn(getUserNames(), getGroupNames());
     
     doTestGetGroups(Arrays.asList(testGroups), 2);
   }
@@ -67,10 +69,10 @@ public class TestLdapGroupsMapping extends TestLdapGroupsMappingBase {
   public void testGetGroupsWithConnectionClosed() throws IOException, NamingException {
     // The case mocks connection is closed/gc-ed, so the first search call throws CommunicationException,
     // then after reconnected return the user NamingEnumeration first, and then the group
-    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+    when(getContext().search(anyString(), anyString(), any(Object[].class),
         any(SearchControls.class)))
         .thenThrow(new CommunicationException("Connection is closed"))
-        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+        .thenReturn(getUserNames(), getGroupNames());
     
     // Although connection is down but after reconnected it still should retrieve the result groups
     doTestGetGroups(Arrays.asList(testGroups), 1 + 2); // 1 is the first failure call 
@@ -79,29 +81,30 @@ public class TestLdapGroupsMapping extends TestLdapGroupsMappingBase {
   @Test
   public void testGetGroupsWithLdapDown() throws IOException, NamingException {
     // This mocks the case where Ldap server is down, and always throws CommunicationException 
-    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+    when(getContext().search(anyString(), anyString(), any(Object[].class),
         any(SearchControls.class)))
         .thenThrow(new CommunicationException("Connection is closed"));
     
     // Ldap server is down, no groups should be retrieved
     doTestGetGroups(Arrays.asList(new String[] {}), 
-        1 + LdapGroupsMapping.RECONNECT_RETRY_COUNT); // 1 is the first normal call
+        LdapGroupsMapping.RECONNECT_RETRY_COUNT);
   }
   
   private void doTestGetGroups(List<String> expectedGroups, int searchTimes) throws IOException, NamingException {  
     Configuration conf = new Configuration();
     // Set this, so we don't throw an exception
     conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
-    
-    mappingSpy.setConf(conf);
+
+    LdapGroupsMapping groupsMapping = getGroupsMapping();
+    groupsMapping.setConf(conf);
     // Username is arbitrary, since the spy is mocked to respond the same,
     // regardless of input
-    List<String> groups = mappingSpy.getGroups("some_user");
+    List<String> groups = groupsMapping.getGroups("some_user");
     
     Assert.assertEquals(expectedGroups, groups);
     
     // We should have searched for a user, and then two groups
-    verify(mockContext, times(searchTimes)).search(anyString(),
+    verify(getContext(), times(searchTimes)).search(anyString(),
                                          anyString(),
                                          any(Object[].class),
                                          any(SearchControls.class));

+ 57 - 14
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.security;
 
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import javax.naming.NamingEnumeration;
@@ -30,34 +29,49 @@ import javax.naming.directory.Attributes;
 import javax.naming.directory.BasicAttribute;
 import javax.naming.directory.BasicAttributes;
 import javax.naming.directory.DirContext;
+import javax.naming.directory.SearchControls;
 import javax.naming.directory.SearchResult;
 
 import org.junit.Before;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
 
 public class TestLdapGroupsMappingBase {
-  protected DirContext mockContext;
+  @Mock
+  private DirContext context;
+  @Mock
+  private NamingEnumeration<SearchResult> userNames;
+  @Mock
+  private NamingEnumeration<SearchResult> groupNames;
+  @Mock
+  private SearchResult userSearchResult;
+  @Mock
+  private Attributes attributes;
+  @Spy
+  private LdapGroupsMapping groupsMapping = new LdapGroupsMapping();
 
-  protected LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
-  protected NamingEnumeration mockUserNamingEnum =
-      mock(NamingEnumeration.class);
-  protected NamingEnumeration mockGroupNamingEnum =
-      mock(NamingEnumeration.class);
   protected String[] testGroups = new String[] {"group1", "group2"};
 
   @Before
   public void setupMocksBase() throws NamingException {
-    mockContext = mock(DirContext.class);
-    doReturn(mockContext).when(mappingSpy).getDirContext();
+    MockitoAnnotations.initMocks(this);
+    DirContext ctx = getContext();
+    doReturn(ctx).when(groupsMapping).getDirContext();
 
+    when(ctx.search(Mockito.anyString(), Mockito.anyString(),
+        Mockito.any(Object[].class), Mockito.any(SearchControls.class))).
+        thenReturn(userNames);
     // We only ever call hasMoreElements once for the user NamingEnum, so
     // we can just have one return value
-    when(mockUserNamingEnum.hasMoreElements()).thenReturn(true);
+    when(userNames.hasMoreElements()).thenReturn(true);
 
-    SearchResult mockGroupResult = mock(SearchResult.class);
+    SearchResult groupSearchResult = mock(SearchResult.class);
     // We're going to have to define the loop here. We want two iterations,
     // to get both the groups
-    when(mockGroupNamingEnum.hasMoreElements()).thenReturn(true, true, false);
-    when(mockGroupNamingEnum.nextElement()).thenReturn(mockGroupResult);
+    when(groupNames.hasMoreElements()).thenReturn(true, true, false);
+    when(groupNames.nextElement()).thenReturn(groupSearchResult);
 
     // Define the attribute for the name of the first group
     Attribute group1Attr = new BasicAttribute("cn");
@@ -72,6 +86,35 @@ public class TestLdapGroupsMappingBase {
     group2Attrs.put(group2Attr);
 
     // This search result gets reused, so return group1, then group2
-    when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
+    when(groupSearchResult.getAttributes()).
+        thenReturn(group1Attrs, group2Attrs);
+
+    when(getUserNames().nextElement()).
+        thenReturn(getUserSearchResult());
+
+    when(getUserSearchResult().getAttributes()).thenReturn(getAttributes());
+  }
+
+  protected DirContext getContext() {
+    return context;
+  }
+  protected NamingEnumeration<SearchResult> getUserNames() {
+    return userNames;
+  }
+
+  protected NamingEnumeration<SearchResult> getGroupNames() {
+    return groupNames;
+  }
+
+  protected SearchResult getUserSearchResult() {
+    return userSearchResult;
+  }
+
+  protected Attributes getAttributes() {
+    return attributes;
+  }
+
+  protected LdapGroupsMapping getGroupsMapping() {
+    return groupsMapping;
   }
 }

+ 100 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithOneQuery.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.naming.NamingEnumeration;
+import javax.naming.NamingException;
+import javax.naming.directory.Attribute;
+import javax.naming.directory.SearchControls;
+import javax.naming.directory.SearchResult;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test LdapGroupsMapping with one-query lookup enabled.
+ * Mockito is used to simulate the LDAP server response.
+ */
+@SuppressWarnings("unchecked")
+public class TestLdapGroupsMappingWithOneQuery
+    extends TestLdapGroupsMappingBase {
+
+  @Before
+  public void setupMocks() throws NamingException {
+    Attribute groupDN = mock(Attribute.class);
+
+    NamingEnumeration<SearchResult> groupNames = getGroupNames();
+    doReturn(groupNames).when(groupDN).getAll();
+    String groupName1 = "CN=abc,DC=foo,DC=bar,DC=com";
+    String groupName2 = "CN=xyz,DC=foo,DC=bar,DC=com";
+    String groupName3 = "CN=sss,CN=foo,DC=bar,DC=com";
+    doReturn(groupName1).doReturn(groupName2).doReturn(groupName3).
+        when(groupNames).next();
+    when(groupNames.hasMore()).thenReturn(true).thenReturn(true).
+        thenReturn(true).thenReturn(false);
+
+    when(getAttributes().get(eq("memberOf"))).thenReturn(groupDN);
+  }
+
+  @Test
+  public void testGetGroups() throws IOException, NamingException {
+    // given a user whose ldap query returns a user object with three "memberOf"
+    // properties, return an array of strings representing its groups.
+    String[] testGroups = new String[] {"abc", "xyz", "sss"};
+    doTestGetGroups(Arrays.asList(testGroups));
+  }
+
+  private void doTestGetGroups(List<String> expectedGroups)
+      throws IOException, NamingException {
+    Configuration conf = new Configuration();
+    // Set this, so we don't throw an exception
+    conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
+    // enable single-query lookup
+    conf.set(LdapGroupsMapping.MEMBEROF_ATTR_KEY, "memberOf");
+
+    LdapGroupsMapping groupsMapping = getGroupsMapping();
+    groupsMapping.setConf(conf);
+    // Username is arbitrary, since the spy is mocked to respond the same,
+    // regardless of input
+    List<String> groups = groupsMapping.getGroups("some_user");
+
+    Assert.assertEquals(expectedGroups, groups);
+
+    // We should have only made one query because single-query lookup is enabled
+    verify(getContext(), times(1)).search(anyString(),
+        anyString(),
+        any(Object[].class),
+        any(SearchControls.class));
+  }
+}

+ 18 - 23
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java

@@ -36,7 +36,6 @@ import javax.naming.NamingException;
 import javax.naming.directory.Attribute;
 import javax.naming.directory.Attributes;
 import javax.naming.directory.SearchControls;
-import javax.naming.directory.SearchResult;
 
 import org.apache.hadoop.conf.Configuration;
 import org.junit.Assert;
@@ -49,31 +48,26 @@ public class TestLdapGroupsMappingWithPosixGroup
 
   @Before
   public void setupMocks() throws NamingException {
-    SearchResult mockUserResult = mock(SearchResult.class);
-    when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
-
-    Attribute mockUidNumberAttr = mock(Attribute.class);
-    Attribute mockGidNumberAttr = mock(Attribute.class);
-    Attribute mockUidAttr = mock(Attribute.class);
-    Attributes mockAttrs = mock(Attributes.class);
-
-    when(mockUidAttr.get()).thenReturn("some_user");
-    when(mockUidNumberAttr.get()).thenReturn("700");
-    when(mockGidNumberAttr.get()).thenReturn("600");
-    when(mockAttrs.get(eq("uid"))).thenReturn(mockUidAttr);
-    when(mockAttrs.get(eq("uidNumber"))).thenReturn(mockUidNumberAttr);
-    when(mockAttrs.get(eq("gidNumber"))).thenReturn(mockGidNumberAttr);
-
-    when(mockUserResult.getAttributes()).thenReturn(mockAttrs);
+    Attribute uidNumberAttr = mock(Attribute.class);
+    Attribute gidNumberAttr = mock(Attribute.class);
+    Attribute uidAttr = mock(Attribute.class);
+    Attributes attributes = getAttributes();
+
+    when(uidAttr.get()).thenReturn("some_user");
+    when(uidNumberAttr.get()).thenReturn("700");
+    when(gidNumberAttr.get()).thenReturn("600");
+    when(attributes.get(eq("uid"))).thenReturn(uidAttr);
+    when(attributes.get(eq("uidNumber"))).thenReturn(uidNumberAttr);
+    when(attributes.get(eq("gidNumber"))).thenReturn(gidNumberAttr);
   }
 
   @Test
   public void testGetGroups() throws IOException, NamingException {
     // The search functionality of the mock context is reused, so we will
     // return the user NamingEnumeration first, and then the group
-    when(mockContext.search(anyString(), contains("posix"),
+    when(getContext().search(anyString(), contains("posix"),
         any(Object[].class), any(SearchControls.class)))
-        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+        .thenReturn(getUserNames(), getGroupNames());
 
     doTestGetGroups(Arrays.asList(testGroups), 2);
   }
@@ -92,19 +86,20 @@ public class TestLdapGroupsMappingWithPosixGroup
     conf.set(LdapGroupsMapping.POSIX_GID_ATTR_KEY, "gidNumber");
     conf.set(LdapGroupsMapping.GROUP_NAME_ATTR_KEY, "cn");
 
-    mappingSpy.setConf(conf);
+    LdapGroupsMapping groupsMapping = getGroupsMapping();
+    groupsMapping.setConf(conf);
     // Username is arbitrary, since the spy is mocked to respond the same,
     // regardless of input
-    List<String> groups = mappingSpy.getGroups("some_user");
+    List<String> groups = groupsMapping.getGroups("some_user");
 
     Assert.assertEquals(expectedGroups, groups);
 
-    mappingSpy.getConf().set(LdapGroupsMapping.POSIX_UID_ATTR_KEY, "uid");
+    groupsMapping.getConf().set(LdapGroupsMapping.POSIX_UID_ATTR_KEY, "uid");
 
     Assert.assertEquals(expectedGroups, groups);
 
     // We should have searched for a user, and then two groups
-    verify(mockContext, times(searchTimes)).search(anyString(),
+    verify(getContext(), times(searchTimes)).search(anyString(),
         anyString(),
         any(Object[].class),
         any(SearchControls.class));

+ 18 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -283,10 +283,15 @@ public class TestUserGroupInformation {
     UserGroupInformation.setConfiguration(conf);
     testConstructorSuccess("user1", "user1");
     testConstructorSuccess("user4@OTHER.REALM", "other-user4");
-    // failure test
-    testConstructorFailures("user2@DEFAULT.REALM");
-    testConstructorFailures("user3/cron@DEFAULT.REALM");
-    testConstructorFailures("user5/cron@OTHER.REALM");
+
+    // pass through test, no transformation
+    testConstructorSuccess("user2@DEFAULT.REALM", "user2@DEFAULT.REALM");
+    testConstructorSuccess("user3/cron@DEFAULT.REALM", "user3/cron@DEFAULT.REALM");
+    testConstructorSuccess("user5/cron@OTHER.REALM", "user5/cron@OTHER.REALM");
+
+    // failures
+    testConstructorFailures("user6@example.com@OTHER.REALM");
+    testConstructorFailures("user7@example.com@DEFAULT.REALM");
     testConstructorFailures(null);
     testConstructorFailures("");
   }
@@ -300,10 +305,13 @@ public class TestUserGroupInformation {
 
     testConstructorSuccess("user1", "user1");
     testConstructorSuccess("user2@DEFAULT.REALM", "user2");
-    testConstructorSuccess("user3/cron@DEFAULT.REALM", "user3");    
+    testConstructorSuccess("user3/cron@DEFAULT.REALM", "user3");
+
+    // no rules applied, local name remains the same
+    testConstructorSuccess("user4@OTHER.REALM", "user4@OTHER.REALM");
+    testConstructorSuccess("user5/cron@OTHER.REALM", "user5/cron@OTHER.REALM");
+
     // failure test
-    testConstructorFailures("user4@OTHER.REALM");
-    testConstructorFailures("user5/cron@OTHER.REALM");
     testConstructorFailures(null);
     testConstructorFailures("");
   }
@@ -344,8 +352,9 @@ public class TestUserGroupInformation {
     } catch (IllegalArgumentException e) {
       String expect = (userName == null || userName.isEmpty())
           ? "Null user" : "Illegal principal name "+userName;
-      assertTrue("Did not find "+ expect + " in " + e,
-          e.toString().contains(expect));
+      String expect2 = "Malformed Kerberos name: "+userName;
+      assertTrue("Did not find "+ expect + " or " + expect2 + " in " + e,
+          e.toString().contains(expect) || e.toString().contains(expect2));
     }
   }
 

+ 52 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java

@@ -30,6 +30,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
@@ -63,6 +64,12 @@ public class TestCredShell {
     assertEquals(outContent.toString(), 0, rc);
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
     		"created."));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_WARN));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_INSTRUCTIONS_DOC));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_CONT));
 
     outContent.reset();
     String[] args2 = {"list", "-provider",
@@ -97,8 +104,8 @@ public class TestCredShell {
     cs.setConf(new Configuration());
     rc = cs.run(args1);
     assertEquals(1, rc);
-    assertTrue(outContent.toString().contains("There are no valid " +
-    		"CredentialProviders configured."));
+    assertTrue(outContent.toString().contains(
+        CredentialShell.NO_VALID_PROVIDERS));
   }
 
   @Test
@@ -132,8 +139,8 @@ public class TestCredShell {
     cs.setConf(config);
     rc = cs.run(args1);
     assertEquals(1, rc);
-    assertTrue(outContent.toString().contains("There are no valid " +
-    		"CredentialProviders configured."));
+    assertTrue(outContent.toString().contains(
+        CredentialShell.NO_VALID_PROVIDERS));
   }
   
   @Test
@@ -225,6 +232,47 @@ public class TestCredShell {
       assertEquals("Expected empty argument on " + cmd + " to return 1", 1,
           shell.init(new String[] { cmd }));
     }
+  }
 
+  @Test
+  public void testStrict() throws Exception {
+    outContent.reset();
+    String[] args1 = {"create", "credential1", "-value", "p@ssw0rd",
+        "-provider", jceksProvider, "-strict"};
+    int rc = 1;
+    CredentialShell cs = new CredentialShell();
+    cs.setConf(new Configuration());
+    rc = cs.run(args1);
+    assertEquals(outContent.toString(), 1, rc);
+    assertFalse(outContent.toString().contains("credential1 has been " +
+        "successfully created."));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_ERROR));
+    assertTrue(outContent.toString()
+        .contains(ProviderUtils.NO_PASSWORD_INSTRUCTIONS_DOC));
+  }
+
+  @Test
+  public void testHelp() throws Exception {
+    outContent.reset();
+    String[] args1 = {"-help"};
+    int rc = 0;
+    CredentialShell cs = new CredentialShell();
+    cs.setConf(new Configuration());
+    rc = cs.run(args1);
+    assertEquals(outContent.toString(), 0, rc);
+    assertTrue(outContent.toString().contains("Usage"));
+  }
+
+  @Test
+  public void testHelpCreate() throws Exception {
+    outContent.reset();
+    String[] args1 = {"create", "-help"};
+    int rc = 0;
+    CredentialShell cs = new CredentialShell();
+    cs.setConf(new Configuration());
+    rc = cs.run(args1);
+    assertEquals(outContent.toString(), 0, rc);
+    assertTrue(outContent.toString().contains("Usage"));
   }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java

@@ -213,7 +213,7 @@ public class TestCredentialProviderFactory {
     Path path = ProviderUtils.unnestUri(new URI(ourUrl));
     FileSystem fs = path.getFileSystem(conf);
     FileStatus s = fs.getFileStatus(path);
-    assertTrue(s.getPermission().toString().equals("rwx------"));
+    assertTrue(s.getPermission().toString().equals("rw-------"));
     assertTrue(file + " should exist", file.isFile());
 
     // check permission retention after explicit change
@@ -235,7 +235,8 @@ public class TestCredentialProviderFactory {
     Path path = ProviderUtils.unnestUri(new URI(ourUrl));
     FileSystem fs = path.getFileSystem(conf);
     FileStatus s = fs.getFileStatus(path);
-    assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rwx------"));
+    assertTrue("Unexpected permissions: " + s.getPermission().toString(),
+        s.getPermission().toString().equals("rw-------"));
     assertTrue(file + " should exist", file.isFile());
 
     // check permission retention after explicit change

+ 22 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestRestCsrfPreventionFilter.java

@@ -32,6 +32,10 @@ import javax.servlet.http.HttpServletResponse;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+/**
+ * This class tests the behavior of the RestCsrfPreventionFilter.
+ *
+ */
 public class TestRestCsrfPreventionFilter {
 
   private static final String NON_BROWSER = "java";
@@ -43,7 +47,7 @@ public class TestRestCsrfPreventionFilter {
   private static final String X_CUSTOM_HEADER = "X-CUSTOM_HEADER";
 
   @Test
-  public void testNoHeaderDefaultConfig_badRequest()
+  public void testNoHeaderDefaultConfigBadRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -56,7 +60,7 @@ public class TestRestCsrfPreventionFilter {
     // CSRF has not been sent
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
-    thenReturn(null);
+      thenReturn(null);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
       thenReturn(BROWSER_AGENT);
 
@@ -75,7 +79,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testNoHeaderCustomAgentConfig_badRequest()
+  public void testNoHeaderCustomAgentConfigBadRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -91,7 +95,7 @@ public class TestRestCsrfPreventionFilter {
     // CSRF has not been sent
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
-    thenReturn(null);
+      thenReturn(null);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
       thenReturn("curl");
 
@@ -110,7 +114,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testNoHeaderDefaultConfigNonBrowser_goodRequest()
+  public void testNoHeaderDefaultConfigNonBrowserGoodRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -123,7 +127,7 @@ public class TestRestCsrfPreventionFilter {
     // CSRF has not been sent
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
-    thenReturn(null);
+      thenReturn(null);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
       thenReturn(NON_BROWSER);
 
@@ -140,7 +144,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testHeaderPresentDefaultConfig_goodRequest()
+  public void testHeaderPresentDefaultConfigGoodRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -168,7 +172,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testHeaderPresentCustomHeaderConfig_goodRequest()
+  public void testHeaderPresentCustomHeaderConfigGoodRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -197,7 +201,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testMissingHeaderWithCustomHeaderConfig_badRequest()
+  public void testMissingHeaderWithCustomHeaderConfigBadRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -209,7 +213,7 @@ public class TestRestCsrfPreventionFilter {
       thenReturn(null);
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
-    thenReturn(BROWSER_AGENT);
+      thenReturn(BROWSER_AGENT);
 
     // CSRF has not been sent
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
@@ -228,7 +232,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testMissingHeaderNoMethodsToIgnoreConfig_badRequest()
+  public void testMissingHeaderNoMethodsToIgnoreConfigBadRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -239,7 +243,7 @@ public class TestRestCsrfPreventionFilter {
       thenReturn("");
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
-    thenReturn(BROWSER_AGENT);
+      thenReturn(BROWSER_AGENT);
 
     // CSRF has not been sent
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
@@ -260,7 +264,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testMissingHeaderIgnoreGETMethodConfig_goodRequest()
+  public void testMissingHeaderIgnoreGETMethodConfigGoodRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -271,7 +275,7 @@ public class TestRestCsrfPreventionFilter {
       thenReturn("GET");
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
-    thenReturn(BROWSER_AGENT);
+      thenReturn(BROWSER_AGENT);
 
     // CSRF has not been sent
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
@@ -292,7 +296,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testMissingHeaderMultipleIgnoreMethodsConfig_goodRequest()
+  public void testMissingHeaderMultipleIgnoreMethodsConfigGoodRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -303,7 +307,7 @@ public class TestRestCsrfPreventionFilter {
       thenReturn("GET,OPTIONS");
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
-    thenReturn(BROWSER_AGENT);
+      thenReturn(BROWSER_AGENT);
 
     // CSRF has not been sent
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).
@@ -324,7 +328,7 @@ public class TestRestCsrfPreventionFilter {
   }
 
   @Test
-  public void testMissingHeaderMultipleIgnoreMethodsConfig_badRequest()
+  public void testMissingHeaderMultipleIgnoreMethodsConfigBadRequest()
       throws ServletException, IOException {
     // Setup the configuration settings of the server
     FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
@@ -335,7 +339,7 @@ public class TestRestCsrfPreventionFilter {
       thenReturn("GET,OPTIONS");
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_USER_AGENT)).
-    thenReturn(BROWSER_AGENT);
+      thenReturn(BROWSER_AGENT);
 
     // CSRF has not been sent
     Mockito.when(mockReq.getHeader(RestCsrfPreventionFilter.HEADER_DEFAULT)).

+ 151 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/http/TestXFrameOptionsFilter.java

@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.http;
+
+import java.util.Collection;
+import java.util.ArrayList;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Test the default and customized behaviors of XFrameOptionsFilter.
+ *
+ */
+public class TestXFrameOptionsFilter {
+  private static final String X_FRAME_OPTIONS = "X-Frame-Options";
+
+  @Test
+  public void testDefaultOptionsValue() throws Exception {
+    final Collection<String> headers = new ArrayList<String>();
+    FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
+    Mockito.when(filterConfig.getInitParameter(
+        XFrameOptionsFilter.CUSTOM_HEADER_PARAM)).thenReturn(null);
+
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    FilterChain chain = Mockito.mock(FilterChain.class);
+
+    Mockito.doAnswer(
+        new Answer() {
+        @Override
+        public Object answer(InvocationOnMock invocation) throws Throwable {
+          Object[] args = invocation.getArguments();
+          Assert.assertTrue(
+              "header should be visible inside chain and filters.",
+              ((HttpServletResponse)args[1]).
+              containsHeader(X_FRAME_OPTIONS));
+            return null;
+          }
+        }
+       ).when(chain).doFilter(Mockito.<ServletRequest>anyObject(),
+          Mockito.<ServletResponse>anyObject());
+
+    Mockito.doAnswer(
+        new Answer() {
+        @Override
+        public Object answer(InvocationOnMock invocation) throws Throwable {
+            Object[] args = invocation.getArguments();
+            Assert.assertTrue(
+                "Options value incorrect should be DENY but is: "
+                + args[1], "DENY".equals(args[1]));
+            headers.add((String)args[1]);
+            return null;
+          }
+        }
+       ).when(response).setHeader(Mockito.<String>anyObject(),
+        Mockito.<String>anyObject());
+
+    XFrameOptionsFilter filter = new XFrameOptionsFilter();
+    filter.init(filterConfig);
+
+    filter.doFilter(request, response, chain);
+
+    Assert.assertEquals("X-Frame-Options count not equal to 1.",
+        headers.size(), 1);
+  }
+
+  @Test
+  public void testCustomOptionsValueAndNoOverrides() throws Exception {
+    final Collection<String> headers = new ArrayList<String>();
+    FilterConfig filterConfig = Mockito.mock(FilterConfig.class);
+    Mockito.when(filterConfig.getInitParameter(
+        XFrameOptionsFilter.CUSTOM_HEADER_PARAM)).thenReturn("SAMEORIGIN");
+
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    final HttpServletResponse response =
+        Mockito.mock(HttpServletResponse.class);
+    FilterChain chain = Mockito.mock(FilterChain.class);
+
+    Mockito.doAnswer(
+        new Answer() {
+        @Override
+        public Object answer(InvocationOnMock invocation) throws Throwable {
+          Object[] args = invocation.getArguments();
+          HttpServletResponse resp = (HttpServletResponse) args[1];
+          Assert.assertTrue(
+              "Header should be visible inside chain and filters.",
+              resp.containsHeader(X_FRAME_OPTIONS));
+          // let's try and set another value for the header and make
+          // sure that it doesn't overwrite the configured value
+          Assert.assertTrue(resp instanceof
+              XFrameOptionsFilter.XFrameOptionsResponseWrapper);
+          resp.setHeader(X_FRAME_OPTIONS, "LJM");
+          return null;
+          }
+        }
+       ).when(chain).doFilter(Mockito.<ServletRequest>anyObject(),
+          Mockito.<ServletResponse>anyObject());
+
+    Mockito.doAnswer(
+        new Answer() {
+        @Override
+        public Object answer(InvocationOnMock invocation) throws Throwable {
+            Object[] args = invocation.getArguments();
+            Assert.assertEquals(
+                "Options value incorrect should be SAMEORIGIN but is: "
+                + args[1], "SAMEORIGIN", args[1]);
+            headers.add((String)args[1]);
+            return null;
+          }
+        }
+       ).when(response).setHeader(Mockito.<String>anyObject(),
+        Mockito.<String>anyObject());
+
+    XFrameOptionsFilter filter = new XFrameOptionsFilter();
+    filter.init(filterConfig);
+
+    filter.doFilter(request, response, chain);
+
+    Assert.assertEquals("X-Frame-Options count not equal to 1.",
+        headers.size(), 1);
+
+    Assert.assertEquals("X-Frame-Options count not equal to 1.",
+        headers.toArray()[0], "SAMEORIGIN");
+  }
+}

+ 25 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java

@@ -14,7 +14,7 @@
 package org.apache.hadoop.util;
 
 import com.google.common.base.Preconditions;
-
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -173,4 +173,28 @@ public class JarFinder {
     }
     return null;
   }
+
+  public static File makeClassLoaderTestJar(Class<?> target, File rootDir,
+      String jarName, int buffSize, String... clsNames) throws IOException {
+    File jarFile = new File(rootDir, jarName);
+    JarOutputStream jstream =
+        new JarOutputStream(new FileOutputStream(jarFile));
+    for (String clsName: clsNames) {
+      String name = clsName.replace('.', '/') + ".class";
+      InputStream entryInputStream = target.getResourceAsStream(
+          "/" + name);
+      ZipEntry entry = new ZipEntry(name);
+      jstream.putNextEntry(entry);
+      BufferedInputStream bufInputStream = new BufferedInputStream(
+          entryInputStream, buffSize);
+      int count;
+      byte[] data = new byte[buffSize];
+      while ((count = bufInputStream.read(data, 0, buffSize)) != -1) {
+        jstream.write(data, 0, count);
+      }
+      jstream.closeEntry();
+    }
+    jstream.close();
+    return jarFile;
+  }
 }

+ 2 - 27
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java

@@ -23,11 +23,9 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
-import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -156,7 +154,8 @@ public class TestRunJar {
     when(runJar.getSystemClasses()).thenReturn(systemClasses);
 
     // create the test jar
-    File testJar = makeClassLoaderTestJar(mainCls, thirdCls);
+    File testJar = JarFinder.makeClassLoaderTestJar(this.getClass(),
+        TEST_ROOT_DIR, TEST_JAR_2_NAME, BUFF_SIZE, mainCls, thirdCls);
     // form the args
     String[] args = new String[3];
     args[0] = testJar.getAbsolutePath();
@@ -166,28 +165,4 @@ public class TestRunJar {
     runJar.run(args);
     // it should not throw an exception
   }
-
-  private File makeClassLoaderTestJar(String... clsNames) throws IOException {
-    File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_2_NAME);
-    JarOutputStream jstream =
-        new JarOutputStream(new FileOutputStream(jarFile));
-    for (String clsName: clsNames) {
-      String name = clsName.replace('.', '/') + ".class";
-      InputStream entryInputStream = this.getClass().getResourceAsStream(
-          "/" + name);
-      ZipEntry entry = new ZipEntry(name);
-      jstream.putNextEntry(entry);
-      BufferedInputStream bufInputStream = new BufferedInputStream(
-          entryInputStream, BUFF_SIZE);
-      int count;
-      byte[] data = new byte[BUFF_SIZE];
-      while ((count = bufInputStream.read(data, 0, BUFF_SIZE)) != -1) {
-        jstream.write(data, 0, count);
-      }
-      jstream.closeEntry();
-    }
-    jstream.close();
-
-    return jarFile;
-  }
 }

+ 88 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 
 /**
  * A JUnit test to test {@link SysInfoLinux}
@@ -110,11 +111,56 @@ public class TestSysInfoLinux {
     "VmallocTotal: 34359738367 kB\n" +
     "VmallocUsed:      1632 kB\n" +
     "VmallocChunk: 34359736375 kB\n" +
-    "HugePages_Total:     0\n" +
+    "HugePages_Total:     %d\n" +
     "HugePages_Free:      0\n" +
     "HugePages_Rsvd:      0\n" +
     "Hugepagesize:     2048 kB";
 
+  static final String MEMINFO_FORMAT_2 =
+    "MemTotal:       %d kB\n" +
+    "MemFree:        %d kB\n" +
+    "Buffers:          129976 kB\n" +
+    "Cached:         32317676 kB\n" +
+    "SwapCached:            0 kB\n" +
+    "Active:         88938588 kB\n" +
+    "Inactive:       %d kB\n" +
+    "Active(anon):   77502200 kB\n" +
+    "Inactive(anon):  6385336 kB\n" +
+    "Active(file):   11436388 kB\n" +
+    "Inactive(file): %d kB\n" +
+    "Unevictable:           0 kB\n" +
+    "Mlocked:               0 kB\n" +
+    "SwapTotal:      %d kB\n" +
+    "SwapFree:       %d kB\n" +
+    "Dirty:            575864 kB\n" +
+    "Writeback:            16 kB\n" +
+    "AnonPages:      83886180 kB\n" +
+    "Mapped:           108640 kB\n" +
+    "Shmem:              1880 kB\n" +
+    "Slab:            2413448 kB\n" +
+    "SReclaimable:    2194488 kB\n" +
+    "SUnreclaim:       218960 kB\n" +
+    "KernelStack:       31496 kB\n" +
+    "PageTables:       195176 kB\n" +
+    "NFS_Unstable:          0 kB\n" +
+    "Bounce:                0 kB\n" +
+    "WritebackTmp:          0 kB\n" +
+    "CommitLimit:    97683468 kB\n" +
+    "Committed_AS:   94553560 kB\n" +
+    "VmallocTotal:   34359738367 kB\n" +
+    "VmallocUsed:      498580 kB\n" +
+    "VmallocChunk:   34256922296 kB\n" +
+    "HardwareCorrupted: %d kB\n" +
+    "AnonHugePages:         0 kB\n" +
+    "HugePages_Total:       %d\n" +
+    "HugePages_Free:        0\n" +
+    "HugePages_Rsvd:        0\n" +
+    "HugePages_Surp:        0\n" +
+    "Hugepagesize:       2048 kB\n" +
+    "DirectMap4k:        4096 kB\n" +
+    "DirectMap2M:     2027520 kB\n" +
+    "DirectMap1G:    132120576 kB\n";
+
   static final String CPUINFO_FORMAT =
     "processor : %s\n" +
     "vendor_id : AuthenticAMD\n" +
@@ -285,19 +331,57 @@ public class TestSysInfoLinux {
     long inactive = 567732L;
     long swapTotal = 2096472L;
     long swapFree = 1818480L;
+    int nrHugePages = 10;
     File tempFile = new File(FAKE_MEMFILE);
     tempFile.deleteOnExit();
     FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
     fWriter.write(String.format(MEMINFO_FORMAT,
-      memTotal, memFree, inactive, swapTotal, swapFree));
+      memTotal, memFree, inactive, swapTotal, swapFree, nrHugePages));
 
     fWriter.close();
     assertEquals(plugin.getAvailablePhysicalMemorySize(),
                  1024L * (memFree + inactive));
     assertEquals(plugin.getAvailableVirtualMemorySize(),
                  1024L * (memFree + inactive + swapFree));
-    assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
-    assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+    assertEquals(plugin.getPhysicalMemorySize(),
+        1024L * (memTotal - (nrHugePages * 2048)));
+    assertEquals(plugin.getVirtualMemorySize(),
+        1024L * (memTotal - (nrHugePages * 2048) + swapTotal));
+  }
+
+  /**
+   * Test parsing /proc/meminfo with Inactive(file) present
+   * @throws IOException
+   */
+  @Test
+  public void parsingProcMemFile2() throws IOException {
+    long memTotal = 131403836L;
+    long memFree = 11257036L;
+    long inactive = 27396032L;
+    long inactiveFile = 21010696L;
+    long swapTotal = 31981552L;
+    long swapFree = 1818480L;
+    long hardwareCorrupt = 31960904L;
+    int nrHugePages = 10;
+    File tempFile = new File(FAKE_MEMFILE);
+    tempFile.deleteOnExit();
+    FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+    fWriter.write(String.format(MEMINFO_FORMAT_2,
+      memTotal, memFree, inactive, inactiveFile, swapTotal, swapFree,
+      hardwareCorrupt, nrHugePages));
+
+    fWriter.close();
+    assertEquals(plugin.getAvailablePhysicalMemorySize(),
+                 1024L * (memFree + inactiveFile));
+    assertFalse(plugin.getAvailablePhysicalMemorySize() ==
+                 1024L * (memFree + inactive));
+    assertEquals(plugin.getAvailableVirtualMemorySize(),
+                 1024L * (memFree + inactiveFile + swapFree));
+    assertEquals(plugin.getPhysicalMemorySize(),
+                 1024L * (memTotal - hardwareCorrupt - (nrHugePages * 2048)));
+    assertEquals(plugin.getVirtualMemorySize(),
+                 1024L * (memTotal - hardwareCorrupt -
+                          (nrHugePages * 2048) + swapTotal));
   }
 
   @Test

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash

@@ -16,7 +16,7 @@
 
 setup() {
 
-  TMP=../../../target/test-dir/bats.$$.${RANDOM}
+  TMP="${BATS_TEST_DIRNAME}/../../../target/test-dir/bats.$$.${RANDOM}"
   mkdir -p ${TMP}
   TMP=$(cd -P -- "${TMP}" >/dev/null && pwd -P)
   export TMP
@@ -38,7 +38,7 @@ setup() {
   # shellcheck disable=SC2034
   QATESTMODE=true
 
-  . ../../main/bin/hadoop-functions.sh
+  . "${BATS_TEST_DIRNAME}/../../main/bin/hadoop-functions.sh"
   pushd "${TMP}" >/dev/null
 }
 

+ 32 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats

@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_escape_sed (positive 1)" {
+  ret="$(hadoop_sed_escape "\pass&&word\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
+  expected="\\\\pass\&\&word\\\0#\$asdf\/g  ><'\"~\`!@#$%^\&*()_+-="
+  echo "actual >${ret}<"
+  echo "expected >${expected}<"
+  [ "${ret}" = "${expected}" ]
+}
+
+@test "hadoop_escape_xml (positive 1)" {
+  ret="$(hadoop_xml_escape "\pass&&word\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
+  expected="\\pass&amp;&amp;word\0#\$asdf/g  \&gt;\&lt;\&apos;\&quot;~\`!@#\$%^&amp;*()_+-="
+  echo "actual >${ret}<"
+  echo "expected >${expected}<"
+  [ "${ret}" = "${expected}" ]
+}

+ 78 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommands.bats

@@ -0,0 +1,78 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# the loading of shell profiles are tested elseswhere
+# this only tests the specific subcommand parts
+
+subcommandsetup () {
+  export HADOOP_LIBEXEC_DIR="${TMP}/libexec"
+  export HADOOP_CONF_DIR="${TMP}/conf"
+  mkdir -p "${HADOOP_LIBEXEC_DIR}"
+  echo   ". \"${BATS_TEST_DIRNAME}/../../main/bin/hadoop-functions.sh\"" > "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+  cat <<-'TOKEN'   >> "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+
+hadoop_subcommand_sub () {
+  echo "unittest"
+  exit 0
+}
+
+hadoop_subcommand_conftest ()
+{
+  echo conftest
+  exit 0
+}
+
+hadoop_subcommand_envcheck ()
+{
+  echo ${HADOOP_SHELL_EXECNAME}
+  exit 0
+}
+
+hadoop_subcommand_multi ()
+{
+  echo $2
+  exit 0
+}
+TOKEN
+  chmod a+rx "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+}
+
+@test "hadoop_subcommand (addition)" {
+  subcommandsetup
+  run "${BATS_TEST_DIRNAME}/../../main/bin/hadoop" sub
+  echo ">${output}<"
+  [ "${output}" = unittest ]
+}
+
+@test "hadoop_subcommand (substitute)" {
+  subcommandsetup
+  run "${BATS_TEST_DIRNAME}/../../main/bin/hadoop" conftest
+  echo ">${output}<"
+  [ "${output}" = conftest ]
+}
+
+@test "hadoop_subcommand (envcheck)" {
+  subcommandsetup
+  run "${BATS_TEST_DIRNAME}/../../main/bin/hadoop" envcheck
+  [ "${output}" = hadoop ]
+}
+
+@test "hadoop_subcommand (multiparams)" {
+  subcommandsetup
+  run "${BATS_TEST_DIRNAME}/../../main/bin/hadoop" multi 1 2
+  [ "${output}" = 2 ]
+}

+ 2 - 2
hadoop-common-project/hadoop-kms/pom.xml

@@ -22,12 +22,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop KMS</name>

+ 4 - 10
hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh

@@ -29,14 +29,6 @@ function hadoop_usage
   hadoop_generate_usage "${MYNAME}" false
 }
 
-function hadoop_escape() {
-      # Escape special chars for the later sed which saves the text as xml attribute
-      local ret
-      ret=$(sed 's/[\/&]/\\&/g' <<< "$1" | sed 's/&/\&amp;/g' | sed 's/"/\\\&quot;/g' \
-          | sed "s/'/\\\\\&apos;/g" | sed 's/</\\\&lt;/g' | sed 's/>/\\\&gt;/g')
-      echo "$ret"
-}
-
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
   HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
@@ -104,8 +96,10 @@ fi
 if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then
   if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]] || [[ -n "${KMS_SSL_TRUSTSTORE_PASS}" ]]; then
       export KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
-      KMS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_escape "$KMS_SSL_KEYSTORE_PASS")
-      KMS_SSL_TRUSTSTORE_PASS_ESCAPED=$(hadoop_escape "$KMS_SSL_TRUSTSTORE_PASS")
+      KMS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
+        "$(hadoop_sed_escape "$KMS_SSL_KEYSTORE_PASS")")
+      KMS_SSL_TRUSTSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
+        "$(hadoop_sed_escape "$KMS_SSL_TRUSTSTORE_PASS")")
       sed -e 's/"_kms_ssl_keystore_pass_"/'"\"${KMS_SSL_KEYSTORE_PASS_ESCAPED}\""'/g' \
           -e 's/"_kms_ssl_truststore_pass_"/'"\"${KMS_SSL_TRUSTSTORE_PASS_ESCAPED}\""'/g' \
         "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \

+ 2 - 2
hadoop-common-project/hadoop-minikdc/pom.xml

@@ -18,13 +18,13 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

+ 2 - 2
hadoop-common-project/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
+    <version>3.0.0-alpha1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
+  <version>3.0.0-alpha1-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio