Browse Source

Merge branch 'trunk' into HADOOP-13345

After HADOOP-14040, we use shaded aws sdk uber-JAR so we don't have to
bring DynamoDB dependency explicitly. However, for tests we do need the
DynamoDBLocal dependency from its Maven repository.
Mingliang Liu 8 years ago
parent
commit
4f7f2baf68
100 changed files with 2744 additions and 1550 deletions
  1. 4 8
      .gitignore
  2. 24 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
  3. 1 3
      hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
  4. 10 2
      hadoop-client-modules/hadoop-client-api/pom.xml
  5. 20 2
      hadoop-client-modules/hadoop-client-minicluster/pom.xml
  6. 14 2
      hadoop-client-modules/hadoop-client-runtime/pom.xml
  7. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  8. 11 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java
  9. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  10. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java
  11. 66 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  12. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  13. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  14. 19 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
  15. 102 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  16. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
  17. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  18. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
  19. 19 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  20. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
  21. 19 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  22. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
  23. 34 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  24. 100 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  25. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  26. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  27. 51 114
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
  28. 108 98
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  29. 67 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
  30. 304 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
  31. 7 319
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  32. 9 34
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
  33. 24 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
  34. 101 28
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  35. 23 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
  36. 3 0
      hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
  37. 1 0
      hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
  38. 1 1
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  39. 6 0
      hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
  40. 1 1
      hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
  41. 1 1
      hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
  42. 9 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  43. 19 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
  44. 31 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
  45. 38 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  46. 14 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
  47. 56 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
  48. 119 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  49. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
  50. 14 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  51. 1 1
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  52. 6 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
  53. 32 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  54. 2 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  55. 0 10
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  56. 3 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
  57. 11 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
  58. 14 0
      hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
  59. 49 3
      hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
  60. 72 17
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
  61. 2 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
  62. 15 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
  63. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  64. 19 29
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  65. 70 35
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
  66. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  67. 4 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
  68. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  69. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  70. 2 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
  71. 51 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  72. 86 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
  73. 33 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
  74. 5 98
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  75. 15 32
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
  76. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
  77. 170 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
  78. 3 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
  79. 0 76
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh
  80. 67 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
  81. 72 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
  82. 17 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html
  83. 98 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/webhdfs/WEB-INF/web.xml
  84. 38 88
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
  85. 0 16
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml
  86. 0 67
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties
  87. 0 151
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
  88. 0 136
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml.conf
  89. 105 31
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
  90. 1 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
  91. 106 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java
  92. 4 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
  93. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  94. 4 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  95. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
  96. 12 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
  97. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  98. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
  99. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
  100. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

+ 4 - 8
.gitignore

@@ -17,6 +17,10 @@ target
 build
 dependency-reduced-pom.xml
 
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
+
 # External tool builders
 */.externalToolBuilders
 */maven-eclipse.xml
@@ -24,8 +28,6 @@ dependency-reduced-pom.xml
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp
@@ -41,10 +43,4 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/
-hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

+ 24 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml

@@ -21,6 +21,14 @@
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
   <fileSets>
+    <!-- Jar file -->
+    <fileSet>
+      <directory>target</directory>
+      <outputDirectory>/share/hadoop/hdfs</outputDirectory>
+      <includes>
+        <include>${project.artifactId}-${project.version}.jar</include>
+      </includes>
+    </fileSet>
     <!-- Configuration files -->
     <fileSet>
       <directory>${basedir}/src/main/conf</directory>
@@ -41,7 +49,7 @@
       <directory>${basedir}/src/main/libexec</directory>
       <outputDirectory>/libexec</outputDirectory>
       <includes>
-        <include>*</include>
+        <include>**/*</include>
       </includes>
       <fileMode>0755</fileMode>
     </fileSet>
@@ -51,4 +59,19 @@
       <outputDirectory>/share/doc/hadoop/httpfs</outputDirectory>
     </fileSet>
   </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <useProjectArtifact>false</useProjectArtifact>
+      <outputDirectory>/share/hadoop/hdfs/lib</outputDirectory>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+        <exclude>org.hsqldb:hsqldb</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
 </assembly>

+ 1 - 3
hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml

@@ -123,9 +123,7 @@
 
         <!-- Checks for Size Violations.                    -->
         <!-- See http://checkstyle.sf.net/config_sizes.html -->
-        <module name="LineLength">
-          <property name="ignorePattern" value="^(package|import) .*"/>
-        </module>
+        <module name="LineLength"/>
         <module name="MethodLength"/>
         <module name="ParameterNumber"/>
 

+ 10 - 2
hadoop-client-modules/hadoop-client-api/pom.xml

@@ -204,8 +204,16 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                        <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resource>NOTICE.txt</resource>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                   </transformers>
                 </configuration>

+ 20 - 2
hadoop-client-modules/hadoop-client-minicluster/pom.xml

@@ -731,8 +731,26 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                      <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>LICENSE</resource>
+                        <resource>LICENSE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>NOTICE.txt</resource>
+                        <resource>Grizzly_THIRDPARTYLICENSEREADME.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.dom-software.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.sax.txt</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                   </transformers>
                 </configuration>

+ 14 - 2
hadoop-client-modules/hadoop-client-runtime/pom.xml

@@ -292,8 +292,20 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                      <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>NOTICE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>LICENSE</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
                       <resource>META-INF/jboss-beans.xml</resource>

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1887,6 +1887,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return result.toString();
     }
 
+    /**
+     * Get range start for the first integer range.
+     * @return range start.
+     */
+    public int getRangeStart() {
+      if (ranges == null || ranges.isEmpty()) {
+        return -1;
+      }
+      Range r = ranges.get(0);
+      return r.start;
+    }
+
     @Override
     public Iterator<Integer> iterator() {
       return new RangeNumberIterator(ranges);

+ 11 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java

@@ -141,8 +141,7 @@ public class CachingKeyProvider extends
   public KeyVersion rollNewVersion(String name, byte[] material)
       throws IOException {
     KeyVersion key = getKeyProvider().rollNewVersion(name, material);
-    getExtension().currentKeyCache.invalidate(name);
-    getExtension().keyMetadataCache.invalidate(name);
+    invalidateCache(name);
     return key;
   }
 
@@ -150,9 +149,18 @@ public class CachingKeyProvider extends
   public KeyVersion rollNewVersion(String name)
       throws NoSuchAlgorithmException, IOException {
     KeyVersion key = getKeyProvider().rollNewVersion(name);
+    invalidateCache(name);
+    return key;
+  }
+
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    getKeyProvider().invalidateCache(name);
     getExtension().currentKeyCache.invalidate(name);
     getExtension().keyMetadataCache.invalidate(name);
-    return key;
+    // invalidating all key versions as we don't know
+    // which ones belonged to the deleted key
+    getExtension().keyVersionCache.invalidateAll();
   }
 
   @Override

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -593,6 +593,18 @@ public abstract class KeyProvider {
     return rollNewVersion(name, material);
   }
 
+  /**
+   * Can be used by implementing classes to invalidate the caches. This could be
+   * used after rollNewVersion to provide a strong guarantee to return the new
+   * version of the given key.
+   *
+   * @param name the basename of the key
+   * @throws IOException
+   */
+  public void invalidateCache(String name) throws IOException {
+    // NOP
+  }
+
   /**
    * Ensures that any changes to the keys are written to persistent store.
    * @throws IOException

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java

@@ -117,6 +117,11 @@ public abstract class KeyProviderExtension
     return keyProvider.rollNewVersion(name, material);
   }
 
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    keyProvider.invalidateCache(name);
+  }
+
   @Override
   public void flush() throws IOException {
     keyProvider.flush();

+ 66 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -46,7 +46,8 @@ public class KeyShell extends CommandShell {
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
-      "   [" + ListCommand.USAGE + "]\n";
+      "   [" + ListCommand.USAGE + "]\n" +
+      "   [" + InvalidateCacheCommand.USAGE + "]\n";
   private static final String LIST_METADATA = "keyShell.list.metadata";
   @VisibleForTesting
   public static final String NO_VALID_PROVIDERS =
@@ -70,6 +71,7 @@ public class KeyShell extends CommandShell {
    * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
    * % hadoop key delete keyName [-provider providerPath] [-i]
+   * % hadoop key invalidateCache keyName [-provider providerPath]
    * </pre>
    * @param args Command line arguments.
    * @return 0 on success, 1 on failure.
@@ -111,6 +113,15 @@ public class KeyShell extends CommandShell {
         }
       } else if ("list".equals(args[i])) {
         setSubCommand(new ListCommand());
+      } else if ("invalidateCache".equals(args[i])) {
+        String keyName = "-help";
+        if (moreTokens) {
+          keyName = args[++i];
+        }
+        setSubCommand(new InvalidateCacheCommand(keyName));
+        if ("-help".equals(keyName)) {
+          return 1;
+        }
       } else if ("-size".equals(args[i]) && moreTokens) {
         options.setBitLength(Integer.parseInt(args[++i]));
       } else if ("-cipher".equals(args[i]) && moreTokens) {
@@ -168,6 +179,9 @@ public class KeyShell extends CommandShell {
     sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
     sbuf.append(banner + "\n");
     sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(InvalidateCacheCommand.USAGE + ":\n\n"
+        + InvalidateCacheCommand.DESC + "\n");
     return sbuf.toString();
   }
 
@@ -466,6 +480,57 @@ public class KeyShell extends CommandShell {
     }
   }
 
+  private class InvalidateCacheCommand extends Command {
+    public static final String USAGE =
+        "invalidateCache <keyname> [-provider <provider>] [-help]";
+    public static final String DESC =
+        "The invalidateCache subcommand invalidates the cached key versions\n"
+            + "of the specified key, on the provider indicated using the"
+            + " -provider argument.\n";
+
+    private String keyName = null;
+
+    InvalidateCacheCommand(String keyName) {
+      this.keyName = keyName;
+    }
+
+    public boolean validate() {
+      boolean rc = true;
+      provider = getKeyProvider();
+      if (provider == null) {
+        getOut().println("Invalid provider.");
+        rc = false;
+      }
+      if (keyName == null) {
+        getOut().println("Please provide a <keyname>.\n" +
+            "See the usage description by using -help.");
+        rc = false;
+      }
+      return rc;
+    }
+
+    public void execute() throws NoSuchAlgorithmException, IOException {
+      try {
+        warnIfTransientProvider();
+        getOut().println("Invalidating cache on KeyProvider: "
+            + provider + "\n  for key name: " + keyName);
+        provider.invalidateCache(keyName);
+        getOut().println("Cached keyversions of " + keyName
+            + " has been successfully invalidated.");
+        printProviderWritten();
+      } catch (IOException e) {
+        getOut().println("Cannot invalidate cache for key: " + keyName +
+            " within KeyProvider: " + provider + ". " + e.toString());
+        throw e;
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
   /**
    * main() entry point for the KeyShell.  While strictly speaking the
    * return is void, it will System.exit() with a return code: 0 is for

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -757,6 +757,17 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     }
   }
 
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    final URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.INVALIDATECACHE_RESOURCE, null);
+    final HttpURLConnection conn = createConnection(url, HTTP_POST);
+    // invalidate the server cache first, then drain local cache.
+    call(conn, null, HttpURLConnection.HTTP_OK, null);
+    drain(name);
+  }
+
   private KeyVersion rollNewVersionInternal(String name, byte[] material)
       throws NoSuchAlgorithmException, IOException {
     checkNotEmpty(name, "name");
@@ -771,7 +782,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     Map response = call(conn, jsonMaterial,
         HttpURLConnection.HTTP_OK, Map.class);
     KeyVersion keyVersion = parseJSONKeyVersion(response);
-    encKeyVersionQueue.drain(name);
+    invalidateCache(name);
     return keyVersion;
   }
 

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -36,6 +36,7 @@ public class KMSRESTConstants {
   public static final String VERSIONS_SUB_RESOURCE = "_versions";
   public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
+  public static final String INVALIDATECACHE_RESOURCE = "_invalidatecache";
 
   public static final String KEY = "key";
   public static final String EEK_OP = "eek_op";

+ 19 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java

@@ -178,6 +178,14 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     }
   }
 
+  // This request is sent to all providers in the load-balancing group
+  @Override
+  public void invalidateCache(String keyName) throws IOException {
+    for (KMSClientProvider provider : providers) {
+      provider.invalidateCache(keyName);
+    }
+  }
+
   @Override
   public EncryptedKeyVersion
       generateEncryptedKey(final String encryptionKeyName)
@@ -218,14 +226,14 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     }
   }
 
-  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion edek)
+  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
       throws IOException, GeneralSecurityException {
     try {
       return doOp(new ProviderCallable<EncryptedKeyVersion>() {
         @Override
         public EncryptedKeyVersion call(KMSClientProvider provider)
             throws IOException, GeneralSecurityException {
-          return provider.reencryptEncryptedKey(edek);
+          return provider.reencryptEncryptedKey(ekv);
         }
       }, nextIdx());
     } catch (WrapperException we) {
@@ -325,6 +333,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       throw new IOException(e.getCause());
     }
   }
+
   @Override
   public void deleteKey(final String name) throws IOException {
     doOp(new ProviderCallable<Void>() {
@@ -335,28 +344,33 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       }
     }, nextIdx());
   }
+
   @Override
   public KeyVersion rollNewVersion(final String name, final byte[] material)
       throws IOException {
-    return doOp(new ProviderCallable<KeyVersion>() {
+    final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
       @Override
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.rollNewVersion(name, material);
       }
     }, nextIdx());
+    invalidateCache(name);
+    return newVersion;
   }
 
   @Override
   public KeyVersion rollNewVersion(final String name)
       throws NoSuchAlgorithmException, IOException {
     try {
-      return doOp(new ProviderCallable<KeyVersion>() {
+      final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
         @Override
         public KeyVersion call(KMSClientProvider provider) throws IOException,
-        NoSuchAlgorithmException {
+            NoSuchAlgorithmException {
           return provider.rollNewVersion(name);
         }
       }, nextIdx());
+      invalidateCache(name);
+      return newVersion;
     } catch (WrapperException e) {
       if (e.getCause() instanceof GeneralSecurityException) {
         throw (NoSuchAlgorithmException) e.getCause();

+ 102 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -18,8 +18,9 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -28,6 +29,9 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.base.Preconditions;
 import com.google.common.cache.CacheBuilder;
@@ -67,8 +71,17 @@ public class ValueQueue <E> {
 
   private static final String REFILL_THREAD =
       ValueQueue.class.getName() + "_thread";
+  private static final int LOCK_ARRAY_SIZE = 16;
+  // Using a mask assuming array size is the power of 2, of MAX_VALUE.
+  private static final int MASK = LOCK_ARRAY_SIZE == Integer.MAX_VALUE ?
+      LOCK_ARRAY_SIZE :
+      LOCK_ARRAY_SIZE - 1;
 
   private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
+  // Stripped rwlocks based on key name to synchronize the queue from
+  // the sync'ed rw-thread and the background async refill thread.
+  private final List<ReadWriteLock> lockArray =
+      new ArrayList<>(LOCK_ARRAY_SIZE);
   private final ThreadPoolExecutor executor;
   private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
   private final QueueRefiller<E> refiller;
@@ -84,9 +97,47 @@ public class ValueQueue <E> {
    */
   private abstract static class NamedRunnable implements Runnable {
     final String name;
+    private AtomicBoolean canceled = new AtomicBoolean(false);
     private NamedRunnable(String keyName) {
       this.name = keyName;
     }
+
+    public void cancel() {
+      canceled.set(true);
+    }
+
+    public boolean isCanceled() {
+      return canceled.get();
+    }
+  }
+
+  private void readLock(String keyName) {
+    getLock(keyName).readLock().lock();
+  }
+
+  private void readUnlock(String keyName) {
+    getLock(keyName).readLock().unlock();
+  }
+
+  private void writeUnlock(String keyName) {
+    getLock(keyName).writeLock().unlock();
+  }
+
+  private void writeLock(String keyName) {
+    getLock(keyName).writeLock().lock();
+  }
+
+  /**
+   * Get the stripped lock given a key name.
+   *
+   * @param keyName The key name.
+   */
+  private ReadWriteLock getLock(String keyName) {
+    return lockArray.get(indexFor(keyName));
+  }
+
+  private static int indexFor(String keyName) {
+    return keyName.hashCode() & MASK;
   }
 
   /**
@@ -103,11 +154,12 @@ public class ValueQueue <E> {
       LinkedBlockingQueue<Runnable> {
 
     private static final long serialVersionUID = -2152747693695890371L;
-    private HashSet<String> keysInProgress = new HashSet<String>();
+    private HashMap<String, Runnable> keysInProgress = new HashMap<>();
 
     @Override
     public synchronized void put(Runnable e) throws InterruptedException {
-      if (keysInProgress.add(((NamedRunnable)e).name)) {
+      if (!keysInProgress.containsKey(((NamedRunnable)e).name)) {
+        keysInProgress.put(((NamedRunnable)e).name, e);
         super.put(e);
       }
     }
@@ -131,6 +183,14 @@ public class ValueQueue <E> {
       return k;
     }
 
+    public Runnable deleteByName(String name) {
+      NamedRunnable e = (NamedRunnable) keysInProgress.remove(name);
+      if (e != null) {
+        e.cancel();
+        super.remove(e);
+      }
+      return e;
+    }
   }
 
   /**
@@ -172,6 +232,9 @@ public class ValueQueue <E> {
     this.policy = policy;
     this.numValues = numValues;
     this.lowWatermark = lowWatermark;
+    for (int i = 0; i < LOCK_ARRAY_SIZE; ++i) {
+      lockArray.add(i, new ReentrantReadWriteLock());
+    }
     keyQueues = CacheBuilder.newBuilder()
             .expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
             .build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
@@ -233,9 +296,18 @@ public class ValueQueue <E> {
    *
    * @param keyName the key to drain the Queue for
    */
-  public void drain(String keyName ) {
+  public void drain(String keyName) {
     try {
-      keyQueues.get(keyName).clear();
+      Runnable e;
+      while ((e = queue.deleteByName(keyName)) != null) {
+        executor.remove(e);
+      }
+      writeLock(keyName);
+      try {
+        keyQueues.get(keyName).clear();
+      } finally {
+        writeUnlock(keyName);
+      }
     } catch (ExecutionException ex) {
       //NOP
     }
@@ -247,14 +319,19 @@ public class ValueQueue <E> {
    * @return int queue size
    */
   public int getSize(String keyName) {
-    // We can't do keyQueues.get(keyName).size() here,
-    // since that will have the side effect of populating the cache.
-    Map<String, LinkedBlockingQueue<E>> map =
-        keyQueues.getAllPresent(Arrays.asList(keyName));
-    if (map.get(keyName) == null) {
-      return 0;
+    readLock(keyName);
+    try {
+      // We can't do keyQueues.get(keyName).size() here,
+      // since that will have the side effect of populating the cache.
+      Map<String, LinkedBlockingQueue<E>> map =
+          keyQueues.getAllPresent(Arrays.asList(keyName));
+      if (map.get(keyName) == null) {
+        return 0;
+      }
+      return map.get(keyName).size();
+    } finally {
+      readUnlock(keyName);
     }
-    return map.get(keyName).size();
   }
 
   /**
@@ -276,7 +353,9 @@ public class ValueQueue <E> {
     LinkedList<E> ekvs = new LinkedList<E>();
     try {
       for (int i = 0; i < num; i++) {
+        readLock(keyName);
         E val = keyQueue.poll();
+        readUnlock(keyName);
         // If queue is empty now, Based on the provided SyncGenerationPolicy,
         // figure out how many new values need to be generated synchronously
         if (val == null) {
@@ -336,9 +415,17 @@ public class ValueQueue <E> {
             int threshold = (int) (lowWatermark * (float) cacheSize);
             // Need to ensure that only one refill task per key is executed
             try {
-              if (keyQueue.size() < threshold) {
-                refiller.fillQueueForKey(name, keyQueue,
-                    cacheSize - keyQueue.size());
+              writeLock(keyName);
+              try {
+                if (keyQueue.size() < threshold && !isCanceled()) {
+                  refiller.fillQueueForKey(name, keyQueue,
+                      cacheSize - keyQueue.size());
+                }
+                if (isCanceled()) {
+                  keyQueue.clear();
+                }
+              } finally {
+                writeUnlock(keyName);
               }
             } catch (final Exception e) {
               throw new RuntimeException(e);

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.IOException;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -29,7 +30,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class BlockLocation {
+public class BlockLocation implements Serializable {
+  private static final long serialVersionUID = 0x22986f6d;
+
   private String[] hosts; // Datanode hostnames
   private String[] cachedHosts; // Datanode hostnames with a cached replica
   private String[] names; // Datanode IP:xferPort for accessing the block
@@ -303,4 +306,4 @@ public class BlockLocation {
     }
     return result.toString();
   }
-}
+}

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -353,6 +353,17 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
     "hadoop.user.group.metrics.percentiles.intervals";
 
+  /* When creating UGI with UserGroupInformation(Subject), treat the passed
+   * subject external if set to true, and assume the owner of the subject
+   * should do the credential renewal.
+   *
+   * This is a temporary config to solve the compatibility issue with
+   * HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
+   */
+  public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
+      "hadoop.treat.subject.external";
+  public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
+
   public static final String RPC_METRICS_QUANTILE_ENABLE =
       "rpc.metrics.quantile.enable";
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java

@@ -48,4 +48,7 @@ public class FSExceptionMessages {
       = "Requested more bytes than destination buffer size";
 
   public static final String PERMISSION_DENIED = "Permission denied";
+
+  public static final String PERMISSION_DENIED_BY_STICKY_BIT =
+      "Permission denied by sticky bit";
 }

+ 19 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.fs;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -31,11 +34,14 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable<FileStatus> {
+public class FileStatus implements Writable, Comparable<FileStatus>,
+    Serializable, ObjectInputValidation {
+
+  private static final long serialVersionUID = 0x13caeae8;
 
   private Path path;
   private long length;
-  private boolean isdir;
+  private Boolean isdir;
   private short block_replication;
   private long blocksize;
   private long modification_time;
@@ -387,4 +393,15 @@ public class FileStatus implements Writable, Comparable<FileStatus> {
     sb.append("}");
     return sb.toString();
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == path) {
+      throw new InvalidObjectException("No Path in deserialized FileStatus");
+    }
+    if (null == isdir) {
+      throw new InvalidObjectException("No type in deserialized FileStatus");
+    }
+  }
+
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public final class FsCreateModes extends FsPermission {
+  private static final long serialVersionUID = 0x22986f6d;
   private final FsPermission unmasked;
 
   /**

+ 19 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.permission;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,8 +39,10 @@ import org.apache.hadoop.io.WritableFactory;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FsPermission implements Writable {
+public class FsPermission implements Writable, Serializable,
+    ObjectInputValidation {
   private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  private static final long serialVersionUID = 0x2fe08564;
 
   static final WritableFactory FACTORY = new WritableFactory() {
     @Override
@@ -60,7 +65,7 @@ public class FsPermission implements Writable {
   private FsAction useraction = null;
   private FsAction groupaction = null;
   private FsAction otheraction = null;
-  private boolean stickyBit = false;
+  private Boolean stickyBit = false;
 
   private FsPermission() {}
 
@@ -202,7 +207,7 @@ public class FsPermission implements Writable {
       return this.useraction == that.useraction
           && this.groupaction == that.groupaction
           && this.otheraction == that.otheraction
-          && this.stickyBit == that.stickyBit;
+          && this.stickyBit.booleanValue() == that.stickyBit.booleanValue();
     }
     return false;
   }
@@ -377,6 +382,7 @@ public class FsPermission implements Writable {
   }
   
   private static class ImmutableFsPermission extends FsPermission {
+    private static final long serialVersionUID = 0x1bab54bd;
     public ImmutableFsPermission(short permission) {
       super(permission);
     }
@@ -386,4 +392,14 @@ public class FsPermission implements Writable {
       throw new UnsupportedOperationException();
     }
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == useraction || null == groupaction || null == otheraction) {
+      throw new InvalidObjectException("Invalid mode in FsPermission");
+    }
+    if (null == stickyBit) {
+      throw new InvalidObjectException("No sticky bit in FsPermission");
+    }
+  }
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FileStatus;
  * Format sequences:<br>
  *   %a: Permissions in octal<br>
  *   %A: Permissions in symbolic style<br>
- *   %b: Size of file in blocks<br>
+ *   %b: Size of file in bytes<br>
  *   %F: Type<br>
  *   %g: Group name of owner<br>
  *   %n: Filename<br>
@@ -60,7 +60,7 @@ class Stat extends FsCommand {
     "Print statistics about the file/directory at <path>" + NEWLINE +
     "in the specified format. Format accepts permissions in" + NEWLINE +
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
-    "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
+    "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
     "of owner (%u), modification date (%y, %Y)." + NEWLINE +
     "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +

+ 34 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -346,8 +346,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
       } catch (KeeperException e) {
         if (isNodeExists(e.code())) {
-          // This is OK - just ensuring existence.
-          continue;
+          // Set ACLs for parent node, if they do not exist or are different
+          try {
+            setAclsWithRetries(prefixPath);
+          } catch (KeeperException e1) {
+            throw new IOException("Couldn't set ACLs on parent ZNode: " +
+                prefixPath, e1);
+          }
         } else {
           throw new IOException("Couldn't create " + prefixPath, e);
         }
@@ -1066,14 +1071,36 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     });
   }
 
+  private void setAclsWithRetries(final String path)
+      throws KeeperException, InterruptedException {
+    Stat stat = new Stat();
+    zkDoWithRetries(new ZKAction<Void>() {
+      @Override
+      public Void run() throws KeeperException, InterruptedException {
+        List<ACL> acl = zkClient.getACL(path, stat);
+        if (acl == null || !acl.containsAll(zkAcl) ||
+            !zkAcl.containsAll(acl)) {
+          zkClient.setACL(path, zkAcl, stat.getVersion());
+        }
+        return null;
+      }
+    }, Code.BADVERSION);
+  }
+
   private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException,
       InterruptedException {
+    return zkDoWithRetries(action, null);
+  }
+
+  private <T> T zkDoWithRetries(ZKAction<T> action, Code retryCode)
+      throws KeeperException, InterruptedException {
     int retry = 0;
     while (true) {
       try {
         return action.run();
       } catch (KeeperException ke) {
-        if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
+        if ((shouldRetry(ke.code()) || shouldRetry(ke.code(), retryCode))
+            && ++retry < maxRetryNum) {
           continue;
         }
         throw ke;
@@ -1189,6 +1216,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private static boolean shouldRetry(Code code) {
     return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
   }
+
+  private static boolean shouldRetry(Code code, Code retryIfCode) {
+    return (retryIfCode == null ? false : retryIfCode == code);
+  }
   
   @Override
   public String toString() {

+ 100 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -60,6 +60,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
@@ -151,6 +152,7 @@ public final class HttpServer2 implements FilterContainer {
 
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
+  protected final IntegerRanges portRanges;
   private final Map<ServletContextHandler, Boolean> defaultContexts =
       new HashMap<>();
   protected final List<String> filterNames = new ArrayList<>();
@@ -189,6 +191,7 @@ public final class HttpServer2 implements FilterContainer {
     private String keyPassword;
 
     private boolean findPort;
+    private IntegerRanges portRanges = null;
 
     private String hostName;
     private boolean disallowFallbackToRandomSignerSecretProvider;
@@ -261,6 +264,11 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    public Builder setPortRanges(IntegerRanges ranges) {
+      this.portRanges = ranges;
+      return this;
+    }
+
     public Builder setConf(Configuration conf) {
       this.conf = conf;
       return this;
@@ -496,6 +504,7 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     this.findPort = b.findPort;
+    this.portRanges = b.portRanges;
     initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
   }
 
@@ -1079,6 +1088,93 @@ public final class HttpServer2 implements FilterContainer {
     }
   }
 
+  /**
+   * Bind listener by closing and opening the listener.
+   * @param listener
+   * @throws Exception
+   */
+  private static void bindListener(ServerConnector listener) throws Exception {
+    // jetty has a bug where you can't reopen a listener that previously
+    // failed to open w/o issuing a close first, even if the port is changed
+    listener.close();
+    listener.open();
+    LOG.info("Jetty bound to port " + listener.getLocalPort());
+  }
+
+  /**
+   * Create bind exception by wrapping the bind exception thrown.
+   * @param listener
+   * @param ex
+   * @return
+   */
+  private static BindException constructBindException(ServerConnector listener,
+      BindException ex) {
+    BindException be = new BindException("Port in use: "
+        + listener.getHost() + ":" + listener.getPort());
+    if (ex != null) {
+      be.initCause(ex);
+    }
+    return be;
+  }
+
+  /**
+   * Bind using single configured port. If findPort is true, we will try to bind
+   * after incrementing port till a free port is found.
+   * @param listener jetty listener.
+   * @param port port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForSinglePort(ServerConnector listener, int port)
+      throws Exception {
+    while (true) {
+      try {
+        bindListener(listener);
+        break;
+      } catch (BindException ex) {
+        if (port == 0 || !findPort) {
+          throw constructBindException(listener, ex);
+        }
+      }
+      // try the next port number
+      listener.setPort(++port);
+      Thread.sleep(100);
+    }
+  }
+
+  /**
+   * Bind using port ranges. Keep on looking for a free port in the port range
+   * and throw a bind exception if no port in the configured range binds.
+   * @param listener jetty listener.
+   * @param startPort initial port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForPortRange(ServerConnector listener, int startPort)
+      throws Exception {
+    BindException bindException = null;
+    try {
+      bindListener(listener);
+      return;
+    } catch (BindException ex) {
+      // Ignore exception.
+      bindException = ex;
+    }
+    for(Integer port : portRanges) {
+      if (port == startPort) {
+        continue;
+      }
+      Thread.sleep(100);
+      listener.setPort(port);
+      try {
+        bindListener(listener);
+        return;
+      } catch (BindException ex) {
+        // Ignore exception. Move to next port.
+        bindException = ex;
+      }
+    }
+    throw constructBindException(listener, bindException);
+  }
+
   /**
    * Open the main listener for the server
    * @throws Exception
@@ -1091,25 +1187,10 @@ public final class HttpServer2 implements FilterContainer {
         continue;
       }
       int port = listener.getPort();
-      while (true) {
-        // jetty has a bug where you can't reopen a listener that previously
-        // failed to open w/o issuing a close first, even if the port is changed
-        try {
-          listener.close();
-          listener.open();
-          LOG.info("Jetty bound to port " + listener.getLocalPort());
-          break;
-        } catch (BindException ex) {
-          if (port == 0 || !findPort) {
-            BindException be = new BindException("Port in use: "
-                + listener.getHost() + ":" + listener.getPort());
-            be.initCause(ex);
-            throw be;
-          }
-        }
-        // try the next port number
-        listener.setPort(++port);
-        Thread.sleep(100);
+      if (portRanges != null && port != 0) {
+        bindForPortRange(listener, port);
+      } else {
+        bindForSinglePort(listener, port);
       }
     }
   }

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -130,7 +130,9 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
           Thread.sleep(retryInfo.delay);
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
-          LOG.warn("Interrupted while waiting to retry", e);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Interrupted while waiting to retry", e);
+          }
           InterruptedIOException intIOE = new InterruptedIOException(
               "Retry interrupted");
           intIOE.initCause(e);
@@ -375,7 +377,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     }
 
     final StringBuilder b = new StringBuilder()
-        .append("Exception while invoking ")
+        .append(ex + ", while invoking ")
         .append(proxyDescriptor.getProxyInfo().getString(method.getName()));
     if (failovers > 0) {
       b.append(" after ").append(failovers).append(" failover attempts");
@@ -384,7 +386,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     b.append(delay > 0? "after sleeping for " + delay + "ms.": "immediately.");
 
     if (info) {
-      LOG.info(b.toString(), ex);
+      LOG.info(b.toString());
     } else {
       LOG.debug(b.toString(), ex);
     }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -910,8 +910,10 @@ public class Client implements AutoCloseable {
       }
       if (action.action == RetryAction.RetryDecision.FAIL) {
         if (action.reason != null) {
-          LOG.warn("Failed to connect to server: " + server + ": "
-              + action.reason, ioe);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Failed to connect to server: " + server + ": "
+                    + action.reason, ioe);
+          }
         }
         throw ioe;
       }

+ 51 - 114
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java

@@ -27,8 +27,7 @@ import java.util.AbstractQueue;
 import java.util.HashMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.Condition;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -55,16 +54,15 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   /* The queues */
   private final ArrayList<BlockingQueue<E>> queues;
 
-  /* Read locks */
-  private final ReentrantLock takeLock = new ReentrantLock();
-  private final Condition notEmpty = takeLock.newCondition();
+  /* Track available permits for scheduled objects.  All methods that will
+   * mutate a subqueue must acquire or release a permit on the semaphore.
+   * A semaphore is much faster than an exclusive lock because producers do
+   * not contend with consumers and consumers do not block other consumers
+   * while polling.
+   */
+  private final Semaphore semaphore = new Semaphore(0);
   private void signalNotEmpty() {
-    takeLock.lock();
-    try {
-      notEmpty.signal();
-    } finally {
-      takeLock.unlock();
-    }
+    semaphore.release();
   }
 
   /* Multiplexer picks which queue to draw from */
@@ -112,26 +110,25 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   }
 
   /**
-   * Returns the first non-empty queue with equal or lesser priority
-   * than <i>startIdx</i>. Wraps around, searching a maximum of N
-   * queues, where N is this.queues.size().
+   * Returns an element first non-empty queue equal to the priority returned
+   * by the multiplexer or scans from highest to lowest priority queue.
+   *
+   * Caller must always acquire a semaphore permit before invoking.
    *
-   * @param startIdx the queue number to start searching at
    * @return the first non-empty queue with less priority, or null if
    * everything was empty
    */
-  private BlockingQueue<E> getFirstNonEmptyQueue(int startIdx) {
-    final int numQueues = this.queues.size();
-    for(int i=0; i < numQueues; i++) {
-      int idx = (i + startIdx) % numQueues; // offset and wrap around
-      BlockingQueue<E> queue = this.queues.get(idx);
-      if (queue.size() != 0) {
-        return queue;
+  private E removeNextElement() {
+    int priority = multiplexer.getAndAdvanceCurrentIndex();
+    E e = queues.get(priority).poll();
+    if (e == null) {
+      for (int idx = 0; e == null && idx < queues.size(); idx++) {
+        e = queues.get(idx).poll();
       }
     }
-
-    // All queues were empty
-    return null;
+    // guaranteed to find an element if caller acquired permit.
+    assert e != null : "consumer didn't acquire semaphore!";
+    return e;
   }
 
   /* AbstractQueue and BlockingQueue methods */
@@ -182,9 +179,9 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
     int priorityLevel = e.getPriorityLevel();
     BlockingQueue<E> q = this.queues.get(priorityLevel);
     boolean ret = q.offer(e, timeout, unit);
-
-    signalNotEmpty();
-
+    if (ret) {
+      signalNotEmpty();
+    }
     return ret;
   }
 
@@ -193,72 +190,21 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
     int priorityLevel = e.getPriorityLevel();
     BlockingQueue<E> q = this.queues.get(priorityLevel);
     boolean ret = q.offer(e);
-
-    signalNotEmpty();
-
+    if (ret) {
+      signalNotEmpty();
+    }
     return ret;
   }
 
   @Override
   public E take() throws InterruptedException {
-    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
-
-    takeLock.lockInterruptibly();
-    try {
-      // Wait while queue is empty
-      for (;;) {
-        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
-        if (q != null) {
-          // Got queue, so return if we can poll out an object
-          E e = q.poll();
-          if (e != null) {
-            return e;
-          }
-        }
-
-        notEmpty.await();
-      }
-    } finally {
-      takeLock.unlock();
-    }
+    semaphore.acquire();
+    return removeNextElement();
   }
 
   @Override
-  public E poll(long timeout, TimeUnit unit)
-      throws InterruptedException {
-
-    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
-
-    long nanos = unit.toNanos(timeout);
-    takeLock.lockInterruptibly();
-    try {
-      for (;;) {
-        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
-        if (q != null) {
-          E e = q.poll();
-          if (e != null) {
-            // Escape condition: there might be something available
-            return e;
-          }
-        }
-
-        if (nanos <= 0) {
-          // Wait has elapsed
-          return null;
-        }
-
-        try {
-          // Now wait on the condition for a bit. If we get
-          // spuriously awoken we'll re-loop
-          nanos = notEmpty.awaitNanos(nanos);
-        } catch (InterruptedException ie) {
-          notEmpty.signal(); // propagate to a non-interrupted thread
-          throw ie;
-        }
-      }
-    } finally {
-      takeLock.unlock();
-    }
+  public E poll(long timeout, TimeUnit unit) throws InterruptedException {
+    return semaphore.tryAcquire(timeout, unit) ? removeNextElement() : null;
   }
 
   /**
@@ -267,15 +213,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
    */
   @Override
   public E poll() {
-    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
-
-    BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
-    if (q == null) {
-      return null; // everything is empty
-    }
-
-    // Delegate to the sub-queue's poll, which could still return null
-    return q.poll();
+    return semaphore.tryAcquire() ? removeNextElement() : null;
   }
 
   /**
@@ -283,12 +221,11 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
    */
   @Override
   public E peek() {
-    BlockingQueue<E> q = this.getFirstNonEmptyQueue(0);
-    if (q == null) {
-      return null;
-    } else {
-      return q.peek();
+    E e = null;
+    for (int i=0; e == null && i < queues.size(); i++) {
+      e = queues.get(i).peek();
     }
+    return e;
   }
 
   /**
@@ -299,11 +236,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
    */
   @Override
   public int size() {
-    int size = 0;
-    for (BlockingQueue<E> q : this.queues) {
-      size += q.size();
-    }
-    return size;
+    return semaphore.availablePermits();
   }
 
   /**
@@ -322,20 +255,24 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
    */
   @Override
   public int drainTo(Collection<? super E> c, int maxElements) {
-    int sum = 0;
-    for (BlockingQueue<E> q : this.queues) {
-      sum += q.drainTo(c, maxElements);
+    // initially take all permits to stop consumers from modifying queues
+    // while draining.  will restore any excess when done draining.
+    final int permits = semaphore.drainPermits();
+    final int numElements = Math.min(maxElements, permits);
+    int numRemaining = numElements;
+    for (int i=0; numRemaining > 0 && i < queues.size(); i++) {
+      numRemaining -= queues.get(i).drainTo(c, numRemaining);
     }
-    return sum;
+    int drained = numElements - numRemaining;
+    if (permits > drained) { // restore unused permits.
+      semaphore.release(permits - drained);
+    }
+    return drained;
   }
 
   @Override
   public int drainTo(Collection<? super E> c) {
-    int sum = 0;
-    for (BlockingQueue<E> q : this.queues) {
-      sum += q.drainTo(c);
-    }
-    return sum;
+    return drainTo(c, Integer.MAX_VALUE);
   }
 
   /**

+ 108 - 98
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1241,20 +1241,16 @@ public abstract class Server {
         LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
-        // Do not log WrappedRpcServerExceptionSuppressed.
-        if (!(e instanceof WrappedRpcServerExceptionSuppressed)) {
-          // A WrappedRpcServerException is an exception that has been sent
-          // to the client, so the stacktrace is unnecessary; any other
-          // exceptions are unexpected internal server errors and thus the
-          // stacktrace should be logged.
-          LOG.info(Thread.currentThread().getName() +
-              ": readAndProcess from client " + c.getHostAddress() +
-              " threw exception [" + e + "]",
-              (e instanceof WrappedRpcServerException) ? null : e);
-        }
+        // Any exceptions that reach here are fatal unexpected internal errors
+        // that could not be sent to the client.
+        LOG.info(Thread.currentThread().getName() +
+            ": readAndProcess from client " + c +
+            " threw exception [" + e + "]", e);
         count = -1; //so that the (count < 0) block is executed
       }
-      if (count < 0) {
+      // setupResponse will signal the connection should be closed when a
+      // fatal response is sent.
+      if (count < 0 || c.shouldClose()) {
         closeConnection(c);
         c = null;
       }
@@ -1582,16 +1578,20 @@ public abstract class Server {
    * unnecessary stack trace logging if it's not an internal server error. 
    */
   @SuppressWarnings("serial")
-  private static class WrappedRpcServerException extends RpcServerException {
+  private static class FatalRpcServerException extends RpcServerException {
     private final RpcErrorCodeProto errCode;
-    public WrappedRpcServerException(RpcErrorCodeProto errCode, IOException ioe) {
+    public FatalRpcServerException(RpcErrorCodeProto errCode, IOException ioe) {
       super(ioe.toString(), ioe);
       this.errCode = errCode;
     }
-    public WrappedRpcServerException(RpcErrorCodeProto errCode, String message) {
+    public FatalRpcServerException(RpcErrorCodeProto errCode, String message) {
       this(errCode, new RpcServerException(message));
     }
     @Override
+    public RpcStatusProto getRpcStatusProto() {
+      return RpcStatusProto.FATAL;
+    }
+    @Override
     public RpcErrorCodeProto getRpcErrorCodeProto() {
       return errCode;
     }
@@ -1601,19 +1601,6 @@ public abstract class Server {
     }
   }
 
-  /**
-   * A WrappedRpcServerException that is suppressed altogether
-   * for the purposes of logging.
-   */
-  @SuppressWarnings("serial")
-  private static class WrappedRpcServerExceptionSuppressed
-      extends WrappedRpcServerException {
-    public WrappedRpcServerExceptionSuppressed(
-        RpcErrorCodeProto errCode, IOException ioe) {
-      super(errCode, ioe);
-    }
-  }
-
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
     private boolean connectionHeaderRead = false; // connection  header is read?
@@ -1645,7 +1632,8 @@ public abstract class Server {
     private ByteBuffer unwrappedData;
     private ByteBuffer unwrappedDataLengthBuffer;
     private int serviceClass;
-    
+    private boolean shouldClose = false;
+
     UserGroupInformation user = null;
     public UserGroupInformation attemptingUser = null; // user name before auth
 
@@ -1689,7 +1677,15 @@ public abstract class Server {
     public String toString() {
       return getHostAddress() + ":" + remotePort; 
     }
-    
+
+    boolean setShouldClose() {
+      return shouldClose = true;
+    }
+
+    boolean shouldClose() {
+      return shouldClose;
+    }
+
     public String getHostAddress() {
       return hostAddress;
     }
@@ -1743,13 +1739,13 @@ public abstract class Server {
     }
 
     private void saslReadAndProcess(RpcWritable.Buffer buffer) throws
-    WrappedRpcServerException, IOException, InterruptedException {
+        RpcServerException, IOException, InterruptedException {
       final RpcSaslProto saslMessage =
           getMessage(RpcSaslProto.getDefaultInstance(), buffer);
       switch (saslMessage.getState()) {
         case WRAP: {
           if (!saslContextEstablished || !useWrap) {
-            throw new WrappedRpcServerException(
+            throw new FatalRpcServerException(
                 RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
                 new SaslException("Server is not wrapping data"));
           }
@@ -1797,7 +1793,7 @@ public abstract class Server {
     /**
      * Process saslMessage and send saslResponse back
      * @param saslMessage received SASL message
-     * @throws WrappedRpcServerException setup failed due to SASL negotiation 
+     * @throws RpcServerException setup failed due to SASL negotiation
      *         failure, premature or invalid connection context, or other state 
      *         errors. This exception needs to be sent to the client. This 
      *         exception will wrap {@link RetriableException}, 
@@ -1807,9 +1803,9 @@ public abstract class Server {
      * @throws InterruptedException
      */
     private void saslProcess(RpcSaslProto saslMessage)
-        throws WrappedRpcServerException, IOException, InterruptedException {
+        throws RpcServerException, IOException, InterruptedException {
       if (saslContextEstablished) {
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
             new SaslException("Negotiation is already complete"));
       }
@@ -1843,10 +1839,10 @@ public abstract class Server {
           AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
           saslContextEstablished = true;
         }
-      } catch (WrappedRpcServerException wrse) { // don't re-wrap
-        throw wrse;
+      } catch (RpcServerException rse) { // don't re-wrap
+        throw rse;
       } catch (IOException ioe) {
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_UNAUTHORIZED, ioe);
       }
       // send back response if any, may throw IOException
@@ -1977,14 +1973,14 @@ public abstract class Server {
       setupResponse(saslCall,
           RpcStatusProto.SUCCESS, null,
           RpcWritable.wrap(message), null, null);
-      saslCall.sendResponse();
+      sendResponse(saslCall);
     }
 
     private void doSaslReply(Exception ioe) throws IOException {
       setupResponse(authFailedCall,
           RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_UNAUTHORIZED,
           null, ioe.getClass().getName(), ioe.getLocalizedMessage());
-      authFailedCall.sendResponse();
+      sendResponse(authFailedCall);
     }
 
     private void disposeSasl() {
@@ -2026,16 +2022,12 @@ public abstract class Server {
      * rpc request length.
      *    
      * @return -1 in case of error, else num bytes read so far
-     * @throws WrappedRpcServerException - an exception that has already been 
-     *         sent back to the client that does not require verbose logging
-     *         by the Listener thread
      * @throws IOException - internal error that should not be returned to
      *         client, typically failure to respond to client
      * @throws InterruptedException
      */
-    public int readAndProcess()
-        throws WrappedRpcServerException, IOException, InterruptedException {
-      while (true) {
+    public int readAndProcess() throws IOException, InterruptedException {
+      while (!shouldClose()) { // stop if a fatal response has been sent.
         // dataLengthBuffer is used to read "hrpc" or the rpc-packet length
         int count = -1;
         if (dataLengthBuffer.remaining() > 0) {
@@ -2101,9 +2093,10 @@ public abstract class Server {
         if (data.remaining() == 0) {
           dataLengthBuffer.clear(); // to read length of future rpc packets
           data.flip();
+          ByteBuffer requestData = data;
+          data = null; // null out in case processOneRpc throws.
           boolean isHeaderRead = connectionContextRead;
-          processOneRpc(data);
-          data = null;
+          processOneRpc(requestData);
           // the last rpc-request we processed could have simply been the
           // connectionContext; if so continue to read the first RPC.
           if (!isHeaderRead) {
@@ -2112,6 +2105,7 @@ public abstract class Server {
         } 
         return count;
       }
+      return -1;
     }
 
     private AuthProtocol initializeAuthContext(int authType)
@@ -2194,14 +2188,14 @@ public abstract class Server {
         setupResponse(fakeCall,
             RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_VERSION_MISMATCH,
             null, VersionMismatch.class.getName(), errMsg);
-        fakeCall.sendResponse();
+        sendResponse(fakeCall);
       } else if (clientVersion >= 3) {
         RpcCall fakeCall = new RpcCall(this, -1);
         // Versions 3 to 8 use older response
         setupResponseOldVersionFatal(buffer, fakeCall,
             null, VersionMismatch.class.getName(), errMsg);
 
-        fakeCall.sendResponse();
+        sendResponse(fakeCall);
       } else if (clientVersion == 2) { // Hadoop 0.18.3
         RpcCall fakeCall = new RpcCall(this, 0);
         DataOutputStream out = new DataOutputStream(buffer);
@@ -2210,7 +2204,7 @@ public abstract class Server {
         WritableUtils.writeString(out, VersionMismatch.class.getName());
         WritableUtils.writeString(out, errMsg);
         fakeCall.setResponse(ByteBuffer.wrap(buffer.toByteArray()));
-        fakeCall.sendResponse();
+        sendResponse(fakeCall);
       }
     }
     
@@ -2218,18 +2212,18 @@ public abstract class Server {
       RpcCall fakeCall = new RpcCall(this, 0);
       fakeCall.setResponse(ByteBuffer.wrap(
           RECEIVED_HTTP_REQ_RESPONSE.getBytes(StandardCharsets.UTF_8)));
-      fakeCall.sendResponse();
+      sendResponse(fakeCall);
     }
 
     /** Reads the connection context following the connection header
-     * @throws WrappedRpcServerException - if the header cannot be
+     * @throws RpcServerException - if the header cannot be
      *         deserialized, or the user is not authorized
      */ 
     private void processConnectionContext(RpcWritable.Buffer buffer)
-        throws WrappedRpcServerException {
+        throws RpcServerException {
       // allow only one connection context during a session
       if (connectionContextRead) {
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
             "Connection context already processed");
       }
@@ -2250,7 +2244,7 @@ public abstract class Server {
             && (!protocolUser.getUserName().equals(user.getUserName()))) {
           if (authMethod == AuthMethod.TOKEN) {
             // Not allowed to doAs if token authentication is used
-            throw new WrappedRpcServerException(
+            throw new FatalRpcServerException(
                 RpcErrorCodeProto.FATAL_UNAUTHORIZED,
                 new AccessControlException("Authenticated user (" + user
                     + ") doesn't match what the client claims to be ("
@@ -2278,13 +2272,10 @@ public abstract class Server {
      * each embedded RPC request 
      * @param inBuf - SASL wrapped request of one or more RPCs
      * @throws IOException - SASL packet cannot be unwrapped
-     * @throws WrappedRpcServerException - an exception that has already been 
-     *         sent back to the client that does not require verbose logging
-     *         by the Listener thread
      * @throws InterruptedException
      */    
     private void unwrapPacketAndProcessRpcs(byte[] inBuf)
-        throws WrappedRpcServerException, IOException, InterruptedException {
+        throws IOException, InterruptedException {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Have read input token of size " + inBuf.length
             + " for processing by saslServer.unwrap()");
@@ -2293,7 +2284,7 @@ public abstract class Server {
       ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(
           inBuf));
       // Read all RPCs contained in the inBuf, even partial ones
-      while (true) {
+      while (!shouldClose()) { // stop if a fatal response has been sent.
         int count = -1;
         if (unwrappedDataLengthBuffer.remaining() > 0) {
           count = channelRead(ch, unwrappedDataLengthBuffer);
@@ -2314,8 +2305,9 @@ public abstract class Server {
         if (unwrappedData.remaining() == 0) {
           unwrappedDataLengthBuffer.clear();
           unwrappedData.flip();
-          processOneRpc(unwrappedData);
-          unwrappedData = null;
+          ByteBuffer requestData = unwrappedData;
+          unwrappedData = null; // null out in case processOneRpc throws.
+          processOneRpc(requestData);
         }
       }
     }
@@ -2334,13 +2326,13 @@ public abstract class Server {
      * @param bb - contains the RPC request header and the rpc request
      * @throws IOException - internal error that should not be returned to
      *         client, typically failure to respond to client
-     * @throws WrappedRpcServerException - an exception that is sent back to the
-     *         client in this method and does not require verbose logging by the
-     *         Listener thread
      * @throws InterruptedException
      */
     private void processOneRpc(ByteBuffer bb)
-        throws IOException, WrappedRpcServerException, InterruptedException {
+        throws IOException, InterruptedException {
+      // exceptions that escape this method are fatal to the connection.
+      // setupResponse will use the rpc status to determine if the connection
+      // should be closed.
       int callId = -1;
       int retry = RpcConstants.INVALID_RETRY_COUNT;
       try {
@@ -2357,40 +2349,47 @@ public abstract class Server {
         if (callId < 0) { // callIds typically used during connection setup
           processRpcOutOfBandRequest(header, buffer);
         } else if (!connectionContextRead) {
-          throw new WrappedRpcServerException(
+          throw new FatalRpcServerException(
               RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
               "Connection context not established");
         } else {
           processRpcRequest(header, buffer);
         }
-      } catch (WrappedRpcServerException wrse) { // inform client of error
-        Throwable ioe = wrse.getCause();
+      } catch (RpcServerException rse) {
+        // inform client of error, but do not rethrow else non-fatal
+        // exceptions will close connection!
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(Thread.currentThread().getName() +
+              ": processOneRpc from client " + this +
+              " threw exception [" + rse + "]");
+        }
+        // use the wrapped exception if there is one.
+        Throwable t = (rse.getCause() != null) ? rse.getCause() : rse;
         final RpcCall call = new RpcCall(this, callId, retry);
         setupResponse(call,
-            RpcStatusProto.FATAL, wrse.getRpcErrorCodeProto(), null,
-            ioe.getClass().getName(), ioe.getMessage());
-        call.sendResponse();
-        throw wrse;
+            rse.getRpcStatusProto(), rse.getRpcErrorCodeProto(), null,
+            t.getClass().getName(), t.getMessage());
+        sendResponse(call);
       }
     }
 
     /**
      * Verify RPC header is valid
      * @param header - RPC request header
-     * @throws WrappedRpcServerException - header contains invalid values 
+     * @throws RpcServerException - header contains invalid values
      */
     private void checkRpcHeaders(RpcRequestHeaderProto header)
-        throws WrappedRpcServerException {
+        throws RpcServerException {
       if (!header.hasRpcOp()) {
         String err = " IPC Server: No rpc op in rpcRequestHeader";
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
       }
       if (header.getRpcOp() != 
           RpcRequestHeaderProto.OperationProto.RPC_FINAL_PACKET) {
         String err = "IPC Server does not implement rpc header operation" + 
                 header.getRpcOp();
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
       }
       // If we know the rpc kind, get its class so that we can deserialize
@@ -2398,7 +2397,7 @@ public abstract class Server {
       // we continue with this original design.
       if (!header.hasRpcKind()) {
         String err = " IPC Server: No rpc kind in rpcRequestHeader";
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
       }
     }
@@ -2411,13 +2410,15 @@ public abstract class Server {
      *     its response will be sent later when the request is processed.
      * @param header - RPC request header
      * @param buffer - stream to request payload
-     * @throws WrappedRpcServerException - due to fatal rpc layer issues such
-     *   as invalid header or deserialization error. In this case a RPC fatal
-     *   status response will later be sent back to client.
+     * @throws RpcServerException - generally due to fatal rpc layer issues
+     *   such as invalid header or deserialization error.  The call queue
+     *   may also throw a fatal or non-fatal exception on overflow.
+     * @throws IOException - fatal internal error that should/could not
+     *   be sent to client.
      * @throws InterruptedException
      */
     private void processRpcRequest(RpcRequestHeaderProto header,
-        RpcWritable.Buffer buffer) throws WrappedRpcServerException,
+        RpcWritable.Buffer buffer) throws RpcServerException,
         InterruptedException {
       Class<? extends Writable> rpcRequestClass = 
           getRpcRequestWrapper(header.getRpcKind());
@@ -2426,18 +2427,20 @@ public abstract class Server {
             " from client " + getHostAddress());
         final String err = "Unknown rpc kind in rpc header"  + 
             header.getRpcKind();
-        throw new WrappedRpcServerException(
-            RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);   
+        throw new FatalRpcServerException(
+            RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
       }
       Writable rpcRequest;
       try { //Read the rpc request
         rpcRequest = buffer.newInstance(rpcRequestClass, conf);
+      } catch (RpcServerException rse) { // lets tests inject failures.
+        throw rse;
       } catch (Throwable t) { // includes runtime exception from newInstance
         LOG.warn("Unable to read call parameters for client " +
                  getHostAddress() + "on connection protocol " +
             this.protocolName + " for rpcKind " + header.getRpcKind(),  t);
         String err = "IPC server unable to read call parameters: "+ t.getMessage();
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
       }
         
@@ -2476,7 +2479,7 @@ public abstract class Server {
       try {
         queueCall(call);
       } catch (IOException ioe) {
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.ERROR_RPC_SERVER, ioe);
       }
       incRpcCount();  // Increment the rpc count
@@ -2487,7 +2490,7 @@ public abstract class Server {
      * reading and authorizing the connection header
      * @param header - RPC header
      * @param buffer - stream to request payload
-     * @throws WrappedRpcServerException - setup failed due to SASL
+     * @throws RpcServerException - setup failed due to SASL
      *         negotiation failure, premature or invalid connection context,
      *         or other state errors. This exception needs to be sent to the 
      *         client.
@@ -2495,13 +2498,13 @@ public abstract class Server {
      * @throws InterruptedException
      */
     private void processRpcOutOfBandRequest(RpcRequestHeaderProto header,
-        RpcWritable.Buffer buffer) throws WrappedRpcServerException,
+        RpcWritable.Buffer buffer) throws RpcServerException,
             IOException, InterruptedException {
       final int callId = header.getCallId();
       if (callId == CONNECTION_CONTEXT_CALL_ID) {
         // SASL must be established prior to connection context
         if (authProtocol == AuthProtocol.SASL && !saslContextEstablished) {
-          throw new WrappedRpcServerException(
+          throw new FatalRpcServerException(
               RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
               "Connection header sent during SASL negotiation");
         }
@@ -2510,7 +2513,7 @@ public abstract class Server {
       } else if (callId == AuthProtocol.SASL.callId) {
         // if client was switched to simple, ignore first SASL message
         if (authProtocol != AuthProtocol.SASL) {
-          throw new WrappedRpcServerException(
+          throw new FatalRpcServerException(
               RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
               "SASL protocol not requested by client");
         }
@@ -2518,7 +2521,7 @@ public abstract class Server {
       } else if (callId == PING_CALL_ID) {
         LOG.debug("Received ping message");
       } else {
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
             "Unknown out of band call #" + callId);
       }
@@ -2526,9 +2529,9 @@ public abstract class Server {
 
     /**
      * Authorize proxy users to access this server
-     * @throws WrappedRpcServerException - user is not allowed to proxy
+     * @throws RpcServerException - user is not allowed to proxy
      */
-    private void authorizeConnection() throws WrappedRpcServerException {
+    private void authorizeConnection() throws RpcServerException {
       try {
         // If auth method is TOKEN, the token was obtained by the
         // real user for the effective user, therefore not required to
@@ -2548,7 +2551,7 @@ public abstract class Server {
             + " for protocol " + connectionContext.getProtocol()
             + " is unauthorized for user " + user);
         rpcMetrics.incrAuthorizationFailures();
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_UNAUTHORIZED, ae);
       }
     }
@@ -2556,21 +2559,24 @@ public abstract class Server {
     /**
      * Decode the a protobuf from the given input stream 
      * @return Message - decoded protobuf
-     * @throws WrappedRpcServerException - deserialization failed
+     * @throws RpcServerException - deserialization failed
      */
     @SuppressWarnings("unchecked")
     <T extends Message> T getMessage(Message message,
-        RpcWritable.Buffer buffer) throws WrappedRpcServerException {
+        RpcWritable.Buffer buffer) throws RpcServerException {
       try {
         return (T)buffer.getValue(message);
       } catch (Exception ioe) {
         Class<?> protoClass = message.getClass();
-        throw new WrappedRpcServerException(
+        throw new FatalRpcServerException(
             RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST,
             "Error decoding " + protoClass.getSimpleName() + ": "+ ioe);
       }
     }
 
+    // ipc reader threads should invoke this directly, whereas handlers
+    // must invoke call.sendResponse to allow lifecycle management of
+    // external, postponed, deferred calls, etc.
     private void sendResponse(RpcCall call) throws IOException {
       responder.doRespond(call);
     }
@@ -2873,6 +2879,10 @@ public abstract class Server {
       RpcCall call, RpcStatusProto status, RpcErrorCodeProto erCode,
       Writable rv, String errorClass, String error)
           throws IOException {
+    // fatal responses will cause the reader to close the connection.
+    if (status == RpcStatusProto.FATAL) {
+      call.connection.setShouldClose();
+    }
     RpcResponseHeaderProto.Builder headerBuilder =
         RpcResponseHeaderProto.newBuilder();
     headerBuilder.setClientId(ByteString.copyFrom(call.clientId));

+ 67 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java

@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.List;
+
+
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public interface InnerNode extends Node {
+  interface Factory<N extends InnerNode> {
+    /** Construct an InnerNode from a path-like string */
+    N newInnerNode(String path);
+  }
+
+  /** Add node <i>n</i> to the subtree of this node
+   * @param n node to be added
+   * @return true if the node is added; false otherwise
+   */
+  boolean add(Node n);
+
+  /** Given a node's string representation, return a reference to the node
+   * @param loc string location of the form /rack/node
+   * @return null if the node is not found or the childnode is there but
+   * not an instance of {@link InnerNodeImpl}
+   */
+  Node getLoc(String loc);
+
+  /** @return its children */
+  List<Node> getChildren();
+
+  /** @return the number of leave nodes. */
+  int getNumOfLeaves();
+
+  /** Remove node <i>n</i> from the subtree of this node
+   * @param n node to be deleted
+   * @return true if the node is deleted; false otherwise
+   */
+  boolean remove(Node n);
+
+  /** get <i>leafIndex</i> leaf of this subtree
+   * if it is not in the <i>excludedNode</i>
+   *
+   * @param leafIndex an indexed leaf of the node
+   * @param excludedNode an excluded node (can be null)
+   * @return
+   */
+  Node getLeaf(int leafIndex, Node excludedNode);
+}

+ 304 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java

@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** InnerNode represents a switch/router of a data center or rack.
+ * Different from a leaf node, it has non-null children.
+ */
+class InnerNodeImpl extends NodeBase implements InnerNode {
+  static class Factory implements InnerNode.Factory<InnerNodeImpl> {
+    private Factory() {}
+
+    @Override
+    public InnerNodeImpl newInnerNode(String path) {
+      return new InnerNodeImpl(path);
+    }
+  }
+
+  static final Factory FACTORY = new Factory();
+
+  private final List<Node> children = new ArrayList<>();
+  private final Map<String, Node> childrenMap = new HashMap<>();
+  private int numOfLeaves;
+
+  /** Construct an InnerNode from a path-like string */
+  InnerNodeImpl(String path) {
+    super(path);
+  }
+
+  /** Construct an InnerNode
+   * from its name, its network location, its parent, and its level */
+  InnerNodeImpl(String name, String location, InnerNode parent, int level) {
+    super(name, location, parent, level);
+  }
+
+  @Override
+  public List<Node> getChildren() {return children;}
+
+  /** @return the number of children this node has */
+  int getNumOfChildren() {
+    return children.size();
+  }
+
+  /** Judge if this node represents a rack
+   * @return true if it has no child or its children are not InnerNodes
+   */
+  boolean isRack() {
+    if (children.isEmpty()) {
+      return true;
+    }
+
+    Node firstChild = children.get(0);
+    if (firstChild instanceof InnerNode) {
+      return false;
+    }
+
+    return true;
+  }
+
+  /** Judge if this node is an ancestor of node <i>n</i>
+   *
+   * @param n a node
+   * @return true if this node is an ancestor of <i>n</i>
+   */
+  boolean isAncestor(Node n) {
+    return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
+      (n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
+      startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
+  }
+
+  /** Judge if this node is the parent of node <i>n</i>
+   *
+   * @param n a node
+   * @return true if this node is the parent of <i>n</i>
+   */
+  boolean isParent(Node n) {
+    return n.getNetworkLocation().equals(getPath(this));
+  }
+
+  /* Return a child name of this node who is an ancestor of node <i>n</i> */
+  private String getNextAncestorName(Node n) {
+    if (!isAncestor(n)) {
+      throw new IllegalArgumentException(
+                                         this + "is not an ancestor of " + n);
+    }
+    String name = n.getNetworkLocation().substring(getPath(this).length());
+    if (name.charAt(0) == PATH_SEPARATOR) {
+      name = name.substring(1);
+    }
+    int index=name.indexOf(PATH_SEPARATOR);
+    if (index !=-1)
+      name = name.substring(0, index);
+    return name;
+  }
+
+  @Override
+  public boolean add(Node n) {
+    if (!isAncestor(n)) {
+      throw new IllegalArgumentException(n.getName()
+          + ", which is located at " + n.getNetworkLocation()
+          + ", is not a descendant of " + getPath(this));
+    }
+    if (isParent(n)) {
+      // this node is the parent of n; add n directly
+      n.setParent(this);
+      n.setLevel(this.level+1);
+      Node prev = childrenMap.put(n.getName(), n);
+      if (prev != null) {
+        for(int i=0; i<children.size(); i++) {
+          if (children.get(i).getName().equals(n.getName())) {
+            children.set(i, n);
+            return false;
+          }
+        }
+      }
+      children.add(n);
+      numOfLeaves++;
+      return true;
+    } else {
+      // find the next ancestor node
+      String parentName = getNextAncestorName(n);
+      InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
+      if (parentNode == null) {
+        // create a new InnerNode
+        parentNode = createParentNode(parentName);
+        children.add(parentNode);
+        childrenMap.put(parentNode.getName(), parentNode);
+      }
+      // add n to the subtree of the next ancestor node
+      if (parentNode.add(n)) {
+        numOfLeaves++;
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+  /**
+   * Creates a parent node to be added to the list of children.
+   * Creates a node using the InnerNode four argument constructor specifying
+   * the name, location, parent, and level of this node.
+   *
+   * <p>To be overridden in subclasses for specific InnerNode implementations,
+   * as alternative to overriding the full {@link #add(Node)} method.
+   *
+   * @param parentName The name of the parent node
+   * @return A new inner node
+   * @see InnerNodeImpl(String, String, InnerNode, int)
+   */
+  private InnerNodeImpl createParentNode(String parentName) {
+    return new InnerNodeImpl(parentName, getPath(this), this, this.getLevel()+1);
+  }
+
+  @Override
+  public boolean remove(Node n) {
+    if (!isAncestor(n)) {
+      throw new IllegalArgumentException(n.getName()
+          + ", which is located at " + n.getNetworkLocation()
+          + ", is not a descendant of " + getPath(this));
+    }
+    if (isParent(n)) {
+      // this node is the parent of n; remove n directly
+      if (childrenMap.containsKey(n.getName())) {
+        for (int i=0; i<children.size(); i++) {
+          if (children.get(i).getName().equals(n.getName())) {
+            children.remove(i);
+            childrenMap.remove(n.getName());
+            numOfLeaves--;
+            n.setParent(null);
+            return true;
+          }
+        }
+      }
+      return false;
+    } else {
+      // find the next ancestor node: the parent node
+      String parentName = getNextAncestorName(n);
+      InnerNodeImpl parentNode = (InnerNodeImpl)childrenMap.get(parentName);
+      if (parentNode == null) {
+        return false;
+      }
+      // remove n from the parent node
+      boolean isRemoved = parentNode.remove(n);
+      // if the parent node has no children, remove the parent node too
+      if (isRemoved) {
+        if (parentNode.getNumOfChildren() == 0) {
+          for(int i=0; i < children.size(); i++) {
+            if (children.get(i).getName().equals(parentName)) {
+              children.remove(i);
+              childrenMap.remove(parentName);
+              break;
+            }
+          }
+        }
+        numOfLeaves--;
+      }
+      return isRemoved;
+    }
+  } // end of remove
+
+  @Override
+  public Node getLoc(String loc) {
+    if (loc == null || loc.length() == 0) return this;
+
+    String[] path = loc.split(PATH_SEPARATOR_STR, 2);
+    Node childnode = childrenMap.get(path[0]);
+    if (childnode == null) return null; // non-existing node
+    if (path.length == 1) return childnode;
+    if (childnode instanceof InnerNode) {
+      return ((InnerNode)childnode).getLoc(path[1]);
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public Node getLeaf(int leafIndex, Node excludedNode) {
+    int count=0;
+    // check if the excluded node a leaf
+    boolean isLeaf =
+      excludedNode == null || !(excludedNode instanceof InnerNode);
+    // calculate the total number of excluded leaf nodes
+    int numOfExcludedLeaves =
+      isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
+    if (isLeafParent()) { // children are leaves
+      if (isLeaf) { // excluded node is a leaf node
+        if (excludedNode != null &&
+            childrenMap.containsKey(excludedNode.getName())) {
+          int excludedIndex = children.indexOf(excludedNode);
+          if (excludedIndex != -1 && leafIndex >= 0) {
+            // excluded node is one of the children so adjust the leaf index
+            leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+          }
+        }
+      }
+      // range check
+      if (leafIndex<0 || leafIndex>=this.getNumOfChildren()) {
+        return null;
+      }
+      return children.get(leafIndex);
+    } else {
+      for(int i=0; i<children.size(); i++) {
+        InnerNodeImpl child = (InnerNodeImpl)children.get(i);
+        if (excludedNode == null || excludedNode != child) {
+          // not the excludedNode
+          int numOfLeaves = child.getNumOfLeaves();
+          if (excludedNode != null && child.isAncestor(excludedNode)) {
+            numOfLeaves -= numOfExcludedLeaves;
+          }
+          if (count+numOfLeaves > leafIndex) {
+            // the leaf is in the child subtree
+            return child.getLeaf(leafIndex-count, excludedNode);
+          } else {
+            // go to the next child
+            count = count+numOfLeaves;
+          }
+        } else { // it is the excluededNode
+          // skip it and set the excludedNode to be null
+          excludedNode = null;
+        }
+      }
+      return null;
+    }
+  }
+
+  private boolean isLeafParent() {
+    return isRack();
+  }
+
+  @Override
+  public int getNumOfLeaves() {
+    return numOfLeaves;
+  }
+
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object to) {
+    return super.equals(to);
+  }
+}

+ 7 - 319
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -17,18 +17,9 @@
  */
 package org.apache.hadoop.net;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.TreeMap;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -37,8 +28,9 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.util.*;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /** The class represents a cluster of computer with a tree hierarchical
  * network topology.
@@ -81,314 +73,11 @@ public class NetworkTopology {
         NetworkTopology.class, NetworkTopology.class), conf);
   }
 
-  /** InnerNode represents a switch/router of a data center or rack.
-   * Different from a leaf node, it has non-null children.
-   */
-  static class InnerNode extends NodeBase {
-    protected List<Node> children=new ArrayList<Node>();
-    private Map<String, Node> childrenMap = new HashMap<String, Node>();
-    private int numOfLeaves;
-        
-    /** Construct an InnerNode from a path-like string */
-    InnerNode(String path) {
-      super(path);
-    }
-        
-    /** Construct an InnerNode from its name and its network location */
-    InnerNode(String name, String location) {
-      super(name, location);
-    }
-        
-    /** Construct an InnerNode
-     * from its name, its network location, its parent, and its level */
-    InnerNode(String name, String location, InnerNode parent, int level) {
-      super(name, location, parent, level);
-    }
-        
-    /** @return its children */
-    List<Node> getChildren() {return children;}
-        
-    /** @return the number of children this node has */
-    int getNumOfChildren() {
-      return children.size();
-    }
-        
-    /** Judge if this node represents a rack 
-     * @return true if it has no child or its children are not InnerNodes
-     */ 
-    boolean isRack() {
-      if (children.isEmpty()) {
-        return true;
-      }
-            
-      Node firstChild = children.get(0);
-      if (firstChild instanceof InnerNode) {
-        return false;
-      }
-            
-      return true;
-    }
-        
-    /** Judge if this node is an ancestor of node <i>n</i>
-     * 
-     * @param n a node
-     * @return true if this node is an ancestor of <i>n</i>
-     */
-    boolean isAncestor(Node n) {
-      return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
-        (n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
-        startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
-    }
-        
-    /** Judge if this node is the parent of node <i>n</i>
-     * 
-     * @param n a node
-     * @return true if this node is the parent of <i>n</i>
-     */
-    boolean isParent(Node n) {
-      return n.getNetworkLocation().equals(getPath(this));
-    }
-        
-    /* Return a child name of this node who is an ancestor of node <i>n</i> */
-    private String getNextAncestorName(Node n) {
-      if (!isAncestor(n)) {
-        throw new IllegalArgumentException(
-                                           this + "is not an ancestor of " + n);
-      }
-      String name = n.getNetworkLocation().substring(getPath(this).length());
-      if (name.charAt(0) == PATH_SEPARATOR) {
-        name = name.substring(1);
-      }
-      int index=name.indexOf(PATH_SEPARATOR);
-      if (index !=-1)
-        name = name.substring(0, index);
-      return name;
-    }
-        
-    /** Add node <i>n</i> to the subtree of this node 
-     * @param n node to be added
-     * @return true if the node is added; false otherwise
-     */
-    boolean add(Node n) {
-      if (!isAncestor(n)) {
-        throw new IllegalArgumentException(n.getName()
-            + ", which is located at " + n.getNetworkLocation()
-            + ", is not a descendant of " + getPath(this));
-      }
-      if (isParent(n)) {
-        // this node is the parent of n; add n directly
-        n.setParent(this);
-        n.setLevel(this.level+1);
-        Node prev = childrenMap.put(n.getName(), n);
-        if (prev != null) {
-          for(int i=0; i<children.size(); i++) {
-            if (children.get(i).getName().equals(n.getName())) {
-              children.set(i, n);
-              return false;
-            }
-          }
-        }
-        children.add(n);
-        numOfLeaves++;
-        return true;
-      } else {
-        // find the next ancestor node
-        String parentName = getNextAncestorName(n);
-        InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
-        if (parentNode == null) {
-          // create a new InnerNode
-          parentNode = createParentNode(parentName);
-          children.add(parentNode);
-          childrenMap.put(parentNode.getName(), parentNode);
-        }
-        // add n to the subtree of the next ancestor node
-        if (parentNode.add(n)) {
-          numOfLeaves++;
-          return true;
-        } else {
-          return false;
-        }
-      }
-    }
-
-    /**
-     * Creates a parent node to be added to the list of children.  
-     * Creates a node using the InnerNode four argument constructor specifying 
-     * the name, location, parent, and level of this node.
-     * 
-     * <p>To be overridden in subclasses for specific InnerNode implementations,
-     * as alternative to overriding the full {@link #add(Node)} method.
-     * 
-     * @param parentName The name of the parent node
-     * @return A new inner node
-     * @see InnerNode#InnerNode(String, String, InnerNode, int)
-     */
-    protected InnerNode createParentNode(String parentName) {
-      return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
-    }
-
-    /** Remove node <i>n</i> from the subtree of this node
-     * @param n node to be deleted 
-     * @return true if the node is deleted; false otherwise
-     */
-    boolean remove(Node n) {
-      if (!isAncestor(n)) {
-        throw new IllegalArgumentException(n.getName()
-            + ", which is located at " + n.getNetworkLocation()
-            + ", is not a descendant of " + getPath(this));
-      }
-      if (isParent(n)) {
-        // this node is the parent of n; remove n directly
-        if (childrenMap.containsKey(n.getName())) {
-          for (int i=0; i<children.size(); i++) {
-            if (children.get(i).getName().equals(n.getName())) {
-              children.remove(i);
-              childrenMap.remove(n.getName());
-              numOfLeaves--;
-              n.setParent(null);
-              return true;
-            }
-          }
-        }
-        return false;
-      } else {
-        // find the next ancestor node: the parent node
-        String parentName = getNextAncestorName(n);
-        InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
-        if (parentNode == null) {
-          return false;
-        }
-        // remove n from the parent node
-        boolean isRemoved = parentNode.remove(n);
-        // if the parent node has no children, remove the parent node too
-        if (isRemoved) {
-          if (parentNode.getNumOfChildren() == 0) {
-            for(int i=0; i < children.size(); i++) {
-              if (children.get(i).getName().equals(parentName)) {
-                children.remove(i);
-                childrenMap.remove(parentName);
-                break;
-              }
-            }
-          }
-          numOfLeaves--;
-        }
-        return isRemoved;
-      }
-    } // end of remove
-        
-    /** Given a node's string representation, return a reference to the node
-     * @param loc string location of the form /rack/node
-     * @return null if the node is not found or the childnode is there but
-     * not an instance of {@link InnerNode}
-     */
-    private Node getLoc(String loc) {
-      if (loc == null || loc.length() == 0) return this;
-            
-      String[] path = loc.split(PATH_SEPARATOR_STR, 2);
-      Node childnode = childrenMap.get(path[0]);
-      if (childnode == null) return null; // non-existing node
-      if (path.length == 1) return childnode;
-      if (childnode instanceof InnerNode) {
-        return ((InnerNode)childnode).getLoc(path[1]);
-      } else {
-        return null;
-      }
-    }
-        
-    /** get <i>leafIndex</i> leaf of this subtree 
-     * if it is not in the <i>excludedNode</i>
-     *
-     * @param leafIndex an indexed leaf of the node
-     * @param excludedNode an excluded node (can be null)
-     * @return
-     */
-    Node getLeaf(int leafIndex, Node excludedNode) {
-      int count=0;
-      // check if the excluded node a leaf
-      boolean isLeaf =
-        excludedNode == null || !(excludedNode instanceof InnerNode);
-      // calculate the total number of excluded leaf nodes
-      int numOfExcludedLeaves =
-        isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
-      if (isLeafParent()) { // children are leaves
-        if (isLeaf) { // excluded node is a leaf node
-          if (excludedNode != null &&
-              childrenMap.containsKey(excludedNode.getName())) {
-            int excludedIndex = children.indexOf(excludedNode);
-            if (excludedIndex != -1 && leafIndex >= 0) {
-              // excluded node is one of the children so adjust the leaf index
-              leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
-            }
-          }
-        }
-        // range check
-        if (leafIndex<0 || leafIndex>=this.getNumOfChildren()) {
-          return null;
-        }
-        return children.get(leafIndex);
-      } else {
-        for(int i=0; i<children.size(); i++) {
-          InnerNode child = (InnerNode)children.get(i);
-          if (excludedNode == null || excludedNode != child) {
-            // not the excludedNode
-            int numOfLeaves = child.getNumOfLeaves();
-            if (excludedNode != null && child.isAncestor(excludedNode)) {
-              numOfLeaves -= numOfExcludedLeaves;
-            }
-            if (count+numOfLeaves > leafIndex) {
-              // the leaf is in the child subtree
-              return child.getLeaf(leafIndex-count, excludedNode);
-            } else {
-              // go to the next child
-              count = count+numOfLeaves;
-            }
-          } else { // it is the excluededNode
-            // skip it and set the excludedNode to be null
-            excludedNode = null;
-          }
-        }
-        return null;
-      }
-    }
-    
-    protected boolean isLeafParent() {
-      return isRack();
-    }
-
-    /**
-      * Determine if children a leaves, default implementation calls {@link #isRack()}
-      * <p>To be overridden in subclasses for specific InnerNode implementations,
-      * as alternative to overriding the full {@link #getLeaf(int, Node)} method.
-      * 
-      * @return true if children are leaves, false otherwise
-      */
-    protected boolean areChildrenLeaves() {
-      return isRack();
-    }
-
-    /**
-     * Get number of leaves.
-     */
-    int getNumOfLeaves() {
-      return numOfLeaves;
-    }
-
-    @Override
-    public int hashCode() {
-      return super.hashCode();
-    }
-
-    @Override
-    public boolean equals(Object to) {
-      return super.equals(to);
-    }
-  } // end of InnerNode
-
+  InnerNode.Factory factory = InnerNodeImpl.FACTORY;
   /**
    * the root cluster map
    */
-  InnerNode clusterMap;
+  InnerNode clusterMap = factory.newInnerNode(NodeBase.ROOT);
   /** Depth of all leaf nodes */
   private int depthOfAllLeaves = -1;
   /** rack counter */
@@ -404,7 +93,6 @@ public class NetworkTopology {
   protected ReadWriteLock netlock = new ReentrantReadWriteLock();
 
   public NetworkTopology() {
-    clusterMap = new InnerNode(InnerNode.ROOT);
   }
 
   /** Add a leaf node

+ 9 - 34
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java

@@ -36,7 +36,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
   public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
 
   public NetworkTopologyWithNodeGroup() {
-    clusterMap = new InnerNodeWithNodeGroup(InnerNode.ROOT);
+    clusterMap = new InnerNodeWithNodeGroup(NodeBase.ROOT);
   }
 
   @Override
@@ -58,7 +58,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
   public String getRack(String loc) {
     netlock.readLock().lock();
     try {
-      loc = InnerNode.normalize(loc);
+      loc = NodeBase.normalize(loc);
       Node locNode = getNode(loc);
       if (locNode instanceof InnerNodeWithNodeGroup) {
         InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
@@ -90,7 +90,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
   public String getNodeGroup(String loc) {
     netlock.readLock().lock();
     try {
-      loc = InnerNode.normalize(loc);
+      loc = NodeBase.normalize(loc);
       Node locNode = getNode(loc);
       if (locNode instanceof InnerNodeWithNodeGroup) {
         InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
@@ -238,7 +238,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
       if (clusterMap.remove(node)) {
         Node nodeGroup = getNode(node.getNetworkLocation());
         if (nodeGroup == null) {
-          nodeGroup = new InnerNode(node.getNetworkLocation());
+          nodeGroup = factory.newInnerNode(node.getNetworkLocation());
         }
         InnerNode rack = (InnerNode)getNode(nodeGroup.getNetworkLocation());
         if (rack == null) {
@@ -302,16 +302,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
   /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
    * or physical host. Different from a leaf node, it has non-null children.
    */
-  static class InnerNodeWithNodeGroup extends InnerNode {
-    public InnerNodeWithNodeGroup(String name, String location, 
-        InnerNode parent, int level) {
-      super(name, location, parent, level);
-    }
-
-    public InnerNodeWithNodeGroup(String name, String location) {
-      super(name, location);
-    }
-
+  static class InnerNodeWithNodeGroup extends InnerNodeImpl {
     public InnerNodeWithNodeGroup(String path) {
       super(path);
     }
@@ -323,10 +314,10 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
         return false;
       }
 
-      Node firstChild = children.get(0);
+      Node firstChild = getChildren().get(0);
 
       if (firstChild instanceof InnerNode) {
-        Node firstGrandChild = (((InnerNode) firstChild).children).get(0);
+        Node firstGrandChild = (((InnerNode) firstChild).getChildren()).get(0);
         if (firstGrandChild instanceof InnerNode) {
           // it is datacenter
           return false;
@@ -343,31 +334,15 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
      * @return true if it has no child or its children are not InnerNodes
      */
     boolean isNodeGroup() {
-      if (children.isEmpty()) {
+      if (getChildren().isEmpty()) {
         return true;
       }
-      Node firstChild = children.get(0);
+      Node firstChild = getChildren().get(0);
       if (firstChild instanceof InnerNode) {
         // it is rack or datacenter
         return false;
       }
       return true;
     }
-    
-    @Override
-    protected boolean isLeafParent() {
-      return isNodeGroup();
-    }
-
-    @Override
-    protected InnerNode createParentNode(String parentName) {
-      return new InnerNodeWithNodeGroup(parentName, getPath(this), this,
-          this.getLevel() + 1);
-    }
-
-    @Override
-    protected boolean areChildrenLeaves() {
-      return isNodeGroup();
-    }
   }
 }

+ 24 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java

@@ -17,10 +17,11 @@
  */
 package org.apache.hadoop.security;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
 
@@ -41,6 +42,9 @@ import java.util.List;
  */
 public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
 
+  public static final Log LOG =
+      LogFactory.getLog(AuthenticationWithProxyUserFilter.class);
+
   /**
    * Constant used in URL's query string to perform a proxy user request, the
    * value of the <code>DO_AS</code> parameter is the user the request will be
@@ -66,29 +70,30 @@ public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
       HttpServletResponse response) throws IOException, ServletException {
 
-    // authorize proxy user before calling next filter.
-    String proxyUser = getDoAs(request);
+    final String proxyUser = getDoAs(request);
     if (proxyUser != null) {
-      UserGroupInformation realUser =
-          UserGroupInformation.createRemoteUser(request.getRemoteUser());
-      UserGroupInformation proxyUserInfo =
-          UserGroupInformation.createProxyUser(proxyUser, realUser);
-
-      try {
-        ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
-      } catch (AuthorizationException ex) {
-        HttpExceptionUtils.createServletExceptionResponse(response,
-            HttpServletResponse.SC_FORBIDDEN, ex);
-        // stop filter chain if there is an Authorization Exception.
-        return;
-      }
 
-      final UserGroupInformation finalProxyUser = proxyUserInfo;
       // Change the remote user after proxy user is authorized.
-      request = new HttpServletRequestWrapper(request) {
+      final HttpServletRequest finalReq = request;
+      request = new HttpServletRequestWrapper(finalReq) {
+
+        private String getRemoteOrProxyUser() throws AuthorizationException {
+          UserGroupInformation realUser =
+              UserGroupInformation.createRemoteUser(finalReq.getRemoteUser());
+          UserGroupInformation proxyUserInfo =
+              UserGroupInformation.createProxyUser(proxyUser, realUser);
+          ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr());
+          return proxyUserInfo.getUserName();
+        }
+
         @Override
         public String getRemoteUser() {
-          return finalProxyUser.getUserName();
+          try {
+            return getRemoteOrProxyUser();
+          } catch (AuthorizationException ex) {
+            LOG.error("Unable to verify proxy user: " + ex.getMessage(), ex);
+          }
+          return null;
         }
       };
 

+ 101 - 28
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.security;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
@@ -79,6 +81,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -273,6 +276,29 @@ public class UserGroupInformation {
   /** Min time (in seconds) before relogin for Kerberos */
   private static long kerberosMinSecondsBeforeRelogin;
   /** The configuration to use */
+
+  /*
+   * This config is a temporary one for backward compatibility.
+   * It means whether to treat the subject passed to
+   * UserGroupInformation(Subject) as external. If true,
+   * -  no renewal thread will be created to do the renew credential
+   * -  reloginFromKeytab() and reloginFromTicketCache will not renew
+   *    credential.
+   * and it assumes that the owner of the subject to renew; if false, it means
+   * to retain the old behavior prior to fixing HADOOP-13558 and HADOOP-13805.
+   * The default is false.
+   */
+  private static boolean treatSubjectExternal = false;
+
+  /*
+   * Some test need the renewal thread to be created even if it does
+   *   UserGroupInformation.loginUserFromSubject(subject);
+   * The test code may set this variable to true via
+   *   setEnableRenewThreadCreationForTest(boolean)
+   * method.
+   */
+  private static boolean enableRenewThreadCreationForTest = false;
+
   private static Configuration conf;
 
   
@@ -338,6 +364,15 @@ public class UserGroupInformation {
         metrics.getGroupsQuantiles = getGroupsQuantiles;
       }
     }
+
+    treatSubjectExternal = conf.getBoolean(HADOOP_TREAT_SUBJECT_EXTERNAL_KEY,
+        HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT);
+    if (treatSubjectExternal) {
+      LOG.info("Config " + HADOOP_TREAT_SUBJECT_EXTERNAL_KEY + " is set to "
+          + "true, the owner of the subject passed to "
+          + " UserGroupInformation(Subject) is supposed to renew the "
+          + "credential.");
+    }
   }
 
   /**
@@ -351,7 +386,19 @@ public class UserGroupInformation {
   public static void setConfiguration(Configuration conf) {
     initialize(conf, true);
   }
-  
+
+  @InterfaceAudience.Private
+  @VisibleForTesting
+  static void setEnableRenewThreadCreationForTest(boolean b) {
+    enableRenewThreadCreationForTest = b;
+  }
+
+  @InterfaceAudience.Private
+  @VisibleForTesting
+  static boolean getEnableRenewThreadCreationForTest() {
+    return enableRenewThreadCreationForTest;
+  }
+
   @InterfaceAudience.Private
   @VisibleForTesting
   public static void reset() {
@@ -361,6 +408,7 @@ public class UserGroupInformation {
     kerberosMinSecondsBeforeRelogin = 0;
     setLoginUser(null);
     HadoopKerberosName.setRules(null);
+    setEnableRenewThreadCreationForTest(false);
   }
   
   /**
@@ -392,6 +440,7 @@ public class UserGroupInformation {
   private final User user;
   private final boolean isKeytab;
   private final boolean isKrbTkt;
+  private final boolean isLoginExternal;
   
   private static String OS_LOGIN_MODULE_NAME;
   private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
@@ -644,28 +693,28 @@ public class UserGroupInformation {
   /**
    * Create a UserGroupInformation for the given subject.
    * This does not change the subject or acquire new credentials.
+   *
+   * The creator of subject is responsible for renewing credentials.
    * @param subject the user's subject
    */
   UserGroupInformation(Subject subject) {
-    this(subject, false);
+    this(subject, treatSubjectExternal);
   }
 
   /**
    * Create a UGI from the given subject.
    * @param subject the subject
-   * @param externalKeyTab if the subject's keytab is managed by the user.
+   * @param isLoginExternal if the subject's keytab is managed by other UGI.
    *                       Setting this to true will prevent UGI from attempting
    *                       to login the keytab, or to renew it.
    */
-  private UserGroupInformation(Subject subject, final boolean externalKeyTab) {
+  private UserGroupInformation(Subject subject, final boolean isLoginExternal) {
     this.subject = subject;
     this.user = subject.getPrincipals(User.class).iterator().next();
-    if (externalKeyTab) {
-      this.isKeytab = false;
-    } else {
-      this.isKeytab = KerberosUtil.hasKerberosKeyTab(subject);
-    }
+
+    this.isKeytab = KerberosUtil.hasKerberosKeyTab(subject);
     this.isKrbTkt = KerberosUtil.hasKerberosTicket(subject);
+    this.isLoginExternal = isLoginExternal;
   }
   
   /**
@@ -766,7 +815,7 @@ public class UserGroupInformation {
       User ugiUser = new User(loginPrincipals.iterator().next().getName(),
           AuthenticationMethod.KERBEROS, login);
       loginSubject.getPrincipals().add(ugiUser);
-      UserGroupInformation ugi = new UserGroupInformation(loginSubject);
+      UserGroupInformation ugi = new UserGroupInformation(loginSubject, false);
       ugi.setLogin(login);
       ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       return ugi;
@@ -782,7 +831,9 @@ public class UserGroupInformation {
   /**
    * Create a UserGroupInformation from a Subject with Kerberos principal.
    *
-   * @param subject             The KerberosPrincipal to use in UGI
+   * @param subject             The KerberosPrincipal to use in UGI.
+   *                            The creator of subject is responsible for
+   *                            renewing credentials.
    *
    * @throws IOException
    * @throws KerberosAuthException if the kerberos login fails
@@ -843,6 +894,10 @@ public class UserGroupInformation {
    * Log in a user using the given subject
    * @param subject the subject to use when logging in a user, or null to
    * create a new subject.
+   *
+   * If subject is not null, the creator of subject is responsible for renewing
+   * credentials.
+   *
    * @throws IOException if login fails
    */
   @InterfaceAudience.Public
@@ -850,17 +905,25 @@ public class UserGroupInformation {
   public synchronized 
   static void loginUserFromSubject(Subject subject) throws IOException {
     ensureInitialized();
+    boolean externalSubject = false;
     try {
       if (subject == null) {
         subject = new Subject();
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Treat subject external: " + treatSubjectExternal
+              + ". When true, assuming keytab is managed extenally since "
+              + " logged in from subject");
+        }
+        externalSubject = treatSubjectExternal;
       }
       LoginContext login =
           newLoginContext(authenticationMethod.getLoginAppName(), 
                           subject, new HadoopConfiguration());
       login.login();
-      LOG.debug("Assuming keytab is managed externally since logged in from"
-          + " subject.");
-      UserGroupInformation realUser = new UserGroupInformation(subject, true);
+
+      UserGroupInformation realUser =
+          new UserGroupInformation(subject, externalSubject);
       realUser.setLogin(login);
       realUser.setAuthenticationMethod(authenticationMethod);
       // If the HADOOP_PROXY_USER environment variable or property
@@ -959,11 +1022,23 @@ public class UserGroupInformation {
     return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
   }
 
+  /**
+   * Should relogin if security is enabled using Kerberos, and
+   * the Subject is not owned by another UGI.
+   * @return true if this UGI should relogin
+   */
+  private boolean shouldRelogin() {
+    return isSecurityEnabled()
+        && user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS
+        && !isLoginExternal;
+  }
+
   /**Spawn a thread to do periodic renewals of kerberos credentials*/
   private void spawnAutoRenewalThreadForUserCreds() {
-    if (!isSecurityEnabled()
-        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
-        || isKeytab) {
+    if (getEnableRenewThreadCreationForTest()) {
+      LOG.warn("Spawning thread to auto renew user credential since " +
+          " enableRenewThreadCreationForTest was set to true.");
+    } else if (!shouldRelogin() || isKeytab) {
       return;
     }
 
@@ -1092,7 +1167,7 @@ public class UserGroupInformation {
       start = Time.now();
       login.login();
       metrics.loginSuccess.add(Time.now() - start);
-      loginUser = new UserGroupInformation(subject);
+      loginUser = new UserGroupInformation(subject, false);
       loginUser.setLogin(login);
       loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
     } catch (LoginException le) {
@@ -1156,8 +1231,9 @@ public class UserGroupInformation {
   public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
     if (!isSecurityEnabled()
         || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
-        || !isKeytab)
+        || !isKeytab) {
       return;
+    }
     KerberosTicket tgt = getTGT();
     if (tgt != null && !shouldRenewImmediatelyForTests &&
         Time.now() < getRefreshTime(tgt)) {
@@ -1210,9 +1286,7 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public synchronized void reloginFromKeytab() throws IOException {
-    if (!isSecurityEnabled()
-        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
-        || !isKeytab) {
+    if (!shouldRelogin() || !isKeytab) {
       return;
     }
 
@@ -1281,9 +1355,7 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public synchronized void reloginFromTicketCache() throws IOException {
-    if (!isSecurityEnabled()
-        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
-        || !isKrbTkt) {
+    if (!shouldRelogin() || !isKrbTkt) {
       return;
     }
     LoginContext login = getLogin();
@@ -1354,7 +1426,8 @@ public class UserGroupInformation {
       start = Time.now();
       login.login();
       metrics.loginSuccess.add(Time.now() - start);
-      UserGroupInformation newLoginUser = new UserGroupInformation(subject);
+      UserGroupInformation newLoginUser =
+          new UserGroupInformation(subject, false);
       newLoginUser.setLogin(login);
       newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       
@@ -1427,7 +1500,7 @@ public class UserGroupInformation {
     }
     Subject subject = new Subject();
     subject.getPrincipals().add(new User(user));
-    UserGroupInformation result = new UserGroupInformation(subject);
+    UserGroupInformation result = new UserGroupInformation(subject, false);
     result.setAuthenticationMethod(authMethod);
     return result;
   }
@@ -1504,7 +1577,7 @@ public class UserGroupInformation {
     Set<Principal> principals = subject.getPrincipals();
     principals.add(new User(user));
     principals.add(new RealUser(realUser));
-    UserGroupInformation result =new UserGroupInformation(subject);
+    UserGroupInformation result =new UserGroupInformation(subject, false);
     result.setAuthenticationMethod(AuthenticationMethod.PROXY);
     return result;
   }

+ 23 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java

@@ -670,6 +670,26 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
     return tokenInfo;
   }
 
+  /**
+   * This method synchronizes the state of a delegation token information in
+   * local cache with its actual value in Zookeeper.
+   *
+   * @param ident Identifier of the token
+   */
+  private synchronized void syncLocalCacheWithZk(TokenIdent ident) {
+    try {
+      DelegationTokenInformation tokenInfo = getTokenInfoFromZK(ident);
+      if (tokenInfo != null && !currentTokens.containsKey(ident)) {
+        currentTokens.put(ident, tokenInfo);
+      } else if (tokenInfo == null && currentTokens.containsKey(ident)) {
+        currentTokens.remove(ident);
+      }
+    } catch (IOException e) {
+      LOG.error("Error retrieving tokenInfo [" + ident.getSequenceNumber()
+          + "] from ZK", e);
+    }
+  }
+
   private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident)
       throws IOException {
     return getTokenInfoFromZK(ident, false);
@@ -851,16 +871,9 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    try {
-      if (!currentTokens.containsKey(id)) {
-        // See if token can be retrieved and placed in currentTokens
-        getTokenInfo(id);
-      }
-      return super.cancelToken(token, canceller);
-    } catch (Exception e) {
-      LOG.error("Exception while checking if token exist !!", e);
-      return id;
-    }
+
+    syncLocalCacheWithZk(id);
+    return super.cancelToken(token, canceller);
   }
 
   private void addOrUpdateToken(TokenIdent ident,

+ 3 - 0
hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md

@@ -263,11 +263,14 @@ Example:
 Note that the setting is not permanent and will be reset when the daemon is restarted.
 This command works by sending a HTTP/HTTPS request to the daemon's internal Jetty servlet, so it supports the following daemons:
 
+* Common
+    * key management server
 * HDFS
     * name node
     * secondary name node
     * data node
     * journal node
+    * HttpFS server
 * YARN
     * resource manager
     * node manager

+ 1 - 0
hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md

@@ -101,6 +101,7 @@ In summary, first, provision the credentials into a provider then configure the
 |HDFS                 |DFSUtil leverages Configuration.getPassword method to use the credential provider API and/or fallback to the clear text value stored in ssl-server.xml.|TODO|
 |YARN                 |WebAppUtils uptakes the use of the credential provider API through the new method on Configuration called getPassword. This provides an alternative to storing the passwords in clear text within the ssl-server.xml file while maintaining backward compatibility.|TODO|
 |KMS                  |Uses HttpServer2.loadSSLConfiguration that leverages Configuration.getPassword to read SSL related credentials. They may be resolved through Credential Provider and/or from the clear text in the config when allowed.|[KMS](../../hadoop-kms/index.html)|
+|HttpFS               |Uses HttpServer2.loadSSLConfiguration that leverages Configuration.getPassword to read SSL related credentials. They may be resolved through Credential Provider and/or from the clear text in the  config when allowed.|[HttpFS Server Setup](../../hadoop-hdfs-httpfs/ServerSetup.html)|
 |AWS <br/> S3/S3A     |Uses Configuration.getPassword to get the S3 credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[AWS S3/S3A Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
 |Azure <br/> WASB     |Uses Configuration.getPassword to get the WASB credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure WASB Usage](../../hadoop-azure/index.html)|
 |Azure <br/> ADLS     |Uses Configuration.getPassword to get the ADLS credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure ADLS Usage](../../hadoop-azure-datalake/index.html)|

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -667,7 +667,7 @@ stat
 
 Usage: `hadoop fs -stat [format] <path> ...`
 
-Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in blocks (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
+Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
 
 Example:
 

+ 6 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

@@ -322,6 +322,12 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `RemoteBytesRead` | Number of bytes read by remote clients |
 | `RemoteBytesWritten` | Number of bytes written by remote clients |
 | `BPServiceActorInfo` | The information about a block pool service actor |
+| `EcReconstructionTasks` | Total number of erasure coding reconstruction tasks |
+| `EcFailedReconstructionTasks` | Total number of erasure coding failed reconstruction tasks |
+| `EcDecodingTimeNanos` | Total number of nanoseconds spent by decoding tasks |
+| `EcReconstructionBytesRead` | Total number of bytes read by erasure coding worker |
+| `EcReconstructionBytesWritten` | Total number of bytes written by erasure coding worker |
+| `EcReconstructionRemoteBytesRead` | Total number of bytes remote read by erasure coding worker |
 
 FsVolume
 --------

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md

@@ -196,7 +196,7 @@ AES offers the greatest cryptographic strength and the best performance. At this
 Data transfer between Web-console and clients are protected by using SSL(HTTPS). SSL configuration is recommended but not required to configure Hadoop security with Kerberos.
 
 To enable SSL for web console of HDFS daemons, set `dfs.http.policy` to either `HTTPS_ONLY` or `HTTP_AND_HTTPS` in hdfs-site.xml.
-Note that this does not affect KMS nor HttpFS, as they are implemented on top of Tomcat and do not respect this parameter. See [Hadoop KMS](../../hadoop-kms/index.html) and [Hadoop HDFS over HTTP - Server Setup](../../hadoop-hdfs-httpfs/ServerSetup.html) for instructions on enabling KMS over HTTPS and HttpFS over HTTPS, respectively.
+Note KMS and HttpFS do not respect this parameter. See [Hadoop KMS](../../hadoop-kms/index.html) and [Hadoop HDFS over HTTP - Server Setup](../../hadoop-hdfs-httpfs/ServerSetup.html) for instructions on enabling KMS over HTTPS and HttpFS over HTTPS, respectively.
 
 To enable SSL for web console of YARN daemons, set `yarn.http.policy` to `HTTPS_ONLY` in yarn-site.xml.
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md

@@ -31,7 +31,7 @@ Setting up tracing is quite simple, however it requires some very minor changes
 
 The tracing system works by collecting information in structs called 'Spans'.
 It is up to you to choose how you want to receive this information
-by using implementation of [SpanReceiver](http://htrace.incubator.apache.org/#Span_Receivers)
+by using implementation of [SpanReceiver](http://htrace.incubator.apache.org/developer_guide.html#SpanReceivers)
 interface bundled with HTrace or implementing it by yourself.
 
 [HTrace](http://htrace.incubator.apache.org/) provides options such as

+ 9 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -138,6 +138,15 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
         "rolled."));
 
+    // jceks provider's invalidate is a no-op.
+    outContent.reset();
+    final String[] args3 =
+        {"invalidateCache", keyName, "-provider", jceksProvider};
+    rc = ks.run(args3);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString()
+        .contains("key1 has been successfully " + "invalidated."));
+
     deleteKey(ks, keyName);
 
     listOut = listKeys(ks, false);

+ 19 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java

@@ -26,6 +26,8 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -216,6 +218,23 @@ public class TestFileStatus {
         MTIME, ATIME, PERMISSION, OWNER, GROUP, symlink, PATH);  
     validateToString(fileStatus);
   }
+
+  @Test
+  public void testSerializable() throws Exception {
+    Path p = new Path("uqsf://ybpnyubfg:8020/sbb/one/onm");
+    FsPermission perm = FsPermission.getFileDefault();
+    FileStatus stat = new FileStatus(4344L, false, 4, 512L << 20, 12345678L,
+        87654321L, perm, "yak", "dingo", p);
+    ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
+    try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
+      oos.writeObject(stat);
+    }
+    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+    try (ObjectInputStream ois = new ObjectInputStream(bais)) {
+      FileStatus deser = (FileStatus) ois.readObject();
+      assertEquals(stat, deser);
+    }
+  }
   
   /**
    * Validate the accessors for FileStatus.

+ 31 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java

@@ -715,6 +715,37 @@ public class TestActiveStandbyElector {
       }
     }
   }
+
+  /**
+   * Test that ACLs are set on parent zNode even if the node already exists.
+   */
+  @Test
+  public void testParentZNodeACLs() throws Exception {
+    KeeperException ke = new KeeperException(Code.NODEEXISTS) {
+      @Override
+      public Code code() {
+        return super.code();
+      }
+    };
+
+    Mockito.when(mockZK.create(Mockito.anyString(), Mockito.eq(new byte[]{}),
+        Mockito.anyListOf(ACL.class),
+        Mockito.eq(CreateMode.PERSISTENT))).thenThrow(ke);
+
+    elector.ensureParentZNode();
+
+    StringBuilder prefix = new StringBuilder();
+    for (String part : ZK_PARENT_NAME.split("/")) {
+      if (part.isEmpty()) continue;
+      prefix.append("/").append(part);
+      if (!"/".equals(prefix.toString())) {
+        Mockito.verify(mockZK).getACL(Mockito.eq(prefix.toString()),
+            Mockito.eq(new Stat()));
+        Mockito.verify(mockZK).setACL(Mockito.eq(prefix.toString()),
+            Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.anyInt());
+      }
+    }
+  }
   
   /**
    * Test for a bug encountered during development of HADOOP-8163:

+ 38 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -20,10 +20,12 @@ package org.apache.hadoop.http;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -644,4 +646,40 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     assertNotNull(conn.getHeaderField("Date"));
     assertEquals(conn.getHeaderField("Expires"), conn.getHeaderField("Date"));
   }
+
+  private static void stopHttpServer(HttpServer2 server) throws Exception {
+    if (server != null) {
+      server.stop();
+    }
+  }
+
+  @Test
+  public void testPortRanges() throws Exception {
+    Configuration conf = new Configuration();
+    int port =  ServerSocketUtil.waitForPort(49000, 60);
+    int endPort = 49500;
+    conf.set("abc", "49000-49500");
+    HttpServer2.Builder builder = new HttpServer2.Builder()
+        .setName("test").setConf(new Configuration()).setFindPort(false);
+    IntegerRanges ranges = conf.getRange("abc", "");
+    int startPort = 0;
+    if (ranges != null && !ranges.isEmpty()) {
+       startPort = ranges.getRangeStart();
+       builder.setPortRanges(ranges);
+    }
+    builder.addEndpoint(URI.create("http://localhost:" + startPort));
+    HttpServer2 myServer = builder.build();
+    HttpServer2 myServer2 = null;
+    try {
+      myServer.start();
+      assertEquals(port, myServer.getConnectorAddress(0).getPort());
+      myServer2 = builder.build();
+      myServer2.start();
+      assertTrue(myServer2.getConnectorAddress(0).getPort() > port &&
+          myServer2.getConnectorAddress(0).getPort() <= endPort);
+    } finally {
+      stopHttpServer(myServer);
+      stopHttpServer(myServer2);
+    }
+  }
 }

+ 14 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java

@@ -157,12 +157,25 @@ public class TestHttpServerWithSpengo {
         Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
       }
 
-      // userA cannot impersonate userC, it fails.
+      // userA cannot impersonate userC, but for /stacks, /jmx and /conf,
+      // they doesn't require users to authorize by default, so they
+      // can be accessed.
       for (String servlet :
           new String[]{"stacks", "jmx", "conf"}){
         HttpURLConnection conn = authUrl
             .openConnection(new URL(serverURL + servlet + "?doAs=userC"),
                 token);
+        Assert.assertEquals(HttpURLConnection.HTTP_OK,
+            conn.getResponseCode());
+      }
+
+      // "/logs" and "/logLevel" require admin authorization,
+      // only userA has the access.
+      for (String servlet :
+          new String[]{"logLevel", "logs"}) {
+        HttpURLConnection conn = authUrl
+            .openConnection(new URL(serverURL + servlet + "?doAs=userC"),
+                token);
         Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
             conn.getResponseCode());
       }

+ 56 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java

@@ -28,9 +28,12 @@ import javax.management.ObjectName;
 import java.lang.management.ManagementFactory;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
-
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
 
 public class TestFairCallQueue extends TestCase {
@@ -43,6 +46,7 @@ public class TestFairCallQueue extends TestCase {
     when(ugi.getUserName()).thenReturn(id);
     when(mockCall.getUserGroupInformation()).thenReturn(ugi);
     when(mockCall.getPriorityLevel()).thenReturn(priority);
+    when(mockCall.toString()).thenReturn("id=" + id + " priority=" + priority);
 
     return mockCall;
   }
@@ -78,6 +82,57 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(fairCallQueue.remainingCapacity(), 1025);
   }
 
+  @Test
+  public void testPrioritization() {
+    int numQueues = 10;
+    Configuration conf = new Configuration();
+    fcq = new FairCallQueue<Schedulable>(numQueues, numQueues, "ns", conf);
+
+    //Schedulable[] calls = new Schedulable[numCalls];
+    List<Schedulable> calls = new ArrayList<>();
+    for (int i=0; i < numQueues; i++) {
+      Schedulable call = mockCall("u", i);
+      calls.add(call);
+      fcq.add(call);
+    }
+
+    final AtomicInteger currentIndex = new AtomicInteger();
+    fcq.setMultiplexer(new RpcMultiplexer(){
+      @Override
+      public int getAndAdvanceCurrentIndex() {
+        return currentIndex.get();
+      }
+    });
+
+    // if there is no call at a given index, return the next highest
+    // priority call available.
+    //   v
+    //0123456789
+    currentIndex.set(3);
+    assertSame(calls.get(3), fcq.poll());
+    assertSame(calls.get(0), fcq.poll());
+    assertSame(calls.get(1), fcq.poll());
+    //      v
+    //--2-456789
+    currentIndex.set(6);
+    assertSame(calls.get(6), fcq.poll());
+    assertSame(calls.get(2), fcq.poll());
+    assertSame(calls.get(4), fcq.poll());
+    //        v
+    //-----5-789
+    currentIndex.set(8);
+    assertSame(calls.get(8), fcq.poll());
+    //         v
+    //-----5-7-9
+    currentIndex.set(9);
+    assertSame(calls.get(9), fcq.poll());
+    assertSame(calls.get(5), fcq.poll());
+    assertSame(calls.get(7), fcq.poll());
+    //----------
+    assertNull(fcq.poll());
+    assertNull(fcq.poll());
+  }
+
   //
   // Ensure that FairCallQueue properly implements BlockingQueue
   //

+ 119 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -31,9 +31,12 @@ import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Server.Call;
+import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -64,6 +67,7 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -77,6 +81,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -85,6 +90,10 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
@@ -1353,6 +1362,116 @@ public class TestRPC extends TestRpcBase {
     }
   }
 
+  public static class FakeRequestClass extends RpcWritable {
+    static volatile IOException exception;
+    @Override
+    void writeTo(ResponseBuffer out) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+    @Override
+    <T> T readFrom(ByteBuffer bb) throws IOException {
+      throw exception;
+    }
+  }
+
+  @SuppressWarnings("serial")
+  public static class TestReaderException extends IOException {
+    public TestReaderException(String msg) {
+      super(msg);
+    }
+    @Override
+    public boolean equals(Object t) {
+      return (t.getClass() == TestReaderException.class) &&
+             getMessage().equals(((TestReaderException)t).getMessage());
+    }
+  }
+
+  @Test (timeout=30000)
+  public void testReaderExceptions() throws Exception {
+    Server server = null;
+    TestRpcService proxy = null;
+
+    // will attempt to return this exception from a reader with and w/o
+    // the connection closing.
+    IOException expectedIOE = new TestReaderException("testing123");
+
+    @SuppressWarnings("serial")
+    IOException rseError = new RpcServerException("keepalive", expectedIOE){
+      @Override
+      public RpcStatusProto getRpcStatusProto() {
+        return RpcStatusProto.ERROR;
+      }
+    };
+    @SuppressWarnings("serial")
+    IOException rseFatal = new RpcServerException("disconnect", expectedIOE) {
+      @Override
+      public RpcStatusProto getRpcStatusProto() {
+        return RpcStatusProto.FATAL;
+      }
+    };
+
+    try {
+      RPC.Builder builder = newServerBuilder(conf)
+          .setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true);
+      server = setupTestServer(builder);
+      Whitebox.setInternalState(
+          server, "rpcRequestClass", FakeRequestClass.class);
+      MutableCounterLong authMetric =
+          (MutableCounterLong)Whitebox.getInternalState(
+              server.getRpcMetrics(), "rpcAuthorizationSuccesses");
+
+      proxy = getClient(addr, conf);
+      boolean isDisconnected = true;
+      Connection lastConn = null;
+      long expectedAuths = 0;
+
+      // fuzz the client.
+      for (int i=0; i < 128; i++) {
+        String reqName = "request[" + i + "]";
+        int r = ThreadLocalRandom.current().nextInt();
+        final boolean doDisconnect = r % 4 == 0;
+        LOG.info("TestDisconnect request[" + i + "] " +
+                 " shouldConnect=" + isDisconnected +
+                 " willDisconnect=" + doDisconnect);
+        if (isDisconnected) {
+          expectedAuths++;
+        }
+        try {
+          FakeRequestClass.exception = doDisconnect ? rseFatal : rseError;
+          proxy.ping(null, newEmptyRequest());
+          fail(reqName + " didn't fail");
+        } catch (ServiceException e) {
+          RemoteException re = (RemoteException)e.getCause();
+          assertEquals(reqName, expectedIOE, re.unwrapRemoteException());
+        }
+        // check authorizations to ensure new connection when expected,
+        // then conclusively determine if connections are disconnected
+        // correctly.
+        assertEquals(reqName, expectedAuths, authMetric.value());
+        if (!doDisconnect) {
+          // if it wasn't fatal, verify there's only one open connection.
+          Connection[] conns = server.getConnections();
+          assertEquals(reqName, 1, conns.length);
+          // verify whether the connection should have been reused.
+          if (isDisconnected) {
+            assertNotSame(reqName, lastConn, conns[0]);
+          } else {
+            assertSame(reqName, lastConn, conns[0]);
+          }
+          lastConn = conns[0];
+        } else if (lastConn != null) {
+          // avoid race condition in server where connection may not be
+          // fully removed yet.  just make sure it's marked for being closed.
+          // the open connection checks above ensure correct behavior.
+          assertTrue(reqName, lastConn.shouldClose());
+        }
+        isDisconnected = doDisconnect;
+      }
+    } finally {
+      stop(server, proxy);
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     new TestRPC().testCallsInternal(conf);
   }

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java

@@ -75,6 +75,7 @@ public class TestUGIWithMiniKdc {
     SecurityUtil.setAuthenticationMethod(
         UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.setEnableRenewThreadCreationForTest(true);
 
     LoginContext loginContext = null;
     try {

+ 14 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -61,6 +61,7 @@ import java.util.LinkedHashSet;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
@@ -1020,8 +1021,7 @@ public class TestUserGroupInformation {
     assertTrue(credsugiTokens.contains(token2));
   }
 
-  @Test
-  public void testCheckTGTAfterLoginFromSubject() throws Exception {
+  private void testCheckTGTAfterLoginFromSubjectHelper() throws Exception {
     // security on, default is remove default realm
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
@@ -1031,6 +1031,7 @@ public class TestUserGroupInformation {
     KeyTab keytab = KeyTab.getInstance();
     subject.getPrivateCredentials().add(keytab);
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws IOException {
@@ -1042,6 +1043,17 @@ public class TestUserGroupInformation {
     });
   }
 
+  @Test(expected = KerberosAuthException.class)
+  public void testCheckTGTAfterLoginFromSubject() throws Exception {
+    testCheckTGTAfterLoginFromSubjectHelper();
+  }
+
+  @Test
+  public void testCheckTGTAfterLoginFromSubjectFix() throws Exception {
+    conf.setBoolean(HADOOP_TREAT_SUBJECT_EXTERNAL_KEY, true);
+    testCheckTGTAfterLoginFromSubjectHelper();
+  }
+
   @Test
   public void testGetNextRetryTime() throws Exception {
     GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -867,7 +867,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*blocks \(%b\), type \(%F\), group name of owner \(%g\),( )*</expected-output>
+          <expected-output>^( |\t)*bytes \(%b\), type \(%F\), group name of owner \(%g\),( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>

+ 6 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java

@@ -183,4 +183,10 @@ public class EagerKeyGeneratorKeyProviderCryptoExtension
     getExtension().drain(name);
     return keyVersion;
   }
+
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    super.invalidateCache(name);
+    getExtension().drain(name);
+  }
 }

+ 32 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -61,7 +61,7 @@ import java.util.Map;
 public class KMS {
 
   public static enum KMSOp {
-    CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION,
+    CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION, INVALIDATE_CACHE,
     GET_KEYS, GET_KEYS_METADATA,
     GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
     GENERATE_EEK, DECRYPT_EEK, REENCRYPT_EEK
@@ -252,6 +252,37 @@ public class KMS {
     }
   }
 
+  @POST
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/"
+      + KMSRESTConstants.INVALIDATECACHE_RESOURCE)
+  public Response invalidateCache(@PathParam("name") final String name)
+      throws Exception {
+    try {
+      LOG.trace("Entering invalidateCache Method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.INVALIDATE_CACHE, name);
+      LOG.debug("Invalidating cache with key name {}.", name);
+
+      user.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          provider.invalidateCache(name);
+          provider.flush();
+          return null;
+        }
+      });
+
+      kmsAudit.ok(user, KMSOp.INVALIDATE_CACHE, name, "");
+      LOG.trace("Exiting invalidateCache for key name {}.", name);
+      return Response.ok().build();
+    } catch (Exception e) {
+      LOG.debug("Exception in invalidateCache for key name {}.", name, e);
+      throw e;
+    }
+  }
+
   @GET
   @Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
   @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)

+ 2 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -48,6 +48,8 @@ public class KMSConfiguration {
   public static final int HTTP_PORT_DEFAULT = 9600;
   public static final String HTTP_HOST_KEY = "hadoop.kms.http.host";
   public static final String HTTP_HOST_DEFAULT = "0.0.0.0";
+  public static final String HTTP_ADMINS_KEY =
+      "hadoop.kms.http.administrators";
 
   // SSL properties
   public static final String SSL_ENABLED_KEY = "hadoop.kms.ssl.enabled";

+ 0 - 10
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -34,9 +34,7 @@ import org.apache.hadoop.crypto.key.CachingKeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
-import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
@@ -144,14 +142,6 @@ public class KMSWebApp implements ServletContextListener {
 
       kmsAudit = new KMSAudit(kmsConf);
 
-      // this is required for the the JMXJsonServlet to work properly.
-      // the JMXJsonServlet is behind the authentication filter,
-      // thus the '*' ACL.
-      sce.getServletContext().setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,
-          kmsConf);
-      sce.getServletContext().setAttribute(HttpServer2.ADMINS_ACL,
-          new AccessControlList(AccessControlList.WILDCARD_ACL_VALUE));
-
       // intializing the KeyProvider
       String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
       if (providerString == null) {

+ 3 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.ConfigurationWithLogging;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
@@ -84,6 +85,8 @@ public class KMSWebServer {
         .setConf(conf)
         .setSSLConf(sslConf)
         .authFilterConfigurationPrefix(KMSAuthenticationFilter.CONFIG_PREFIX)
+        .setACL(new AccessControlList(conf.get(
+            KMSConfiguration.HTTP_ADMINS_KEY, " ")))
         .addEndpoint(endpoint)
         .build();
   }

+ 11 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java

@@ -210,6 +210,17 @@ public class KeyAuthorizationKeyProvider extends KeyProviderCryptoExtension {
     }
   }
 
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    writeLock.lock();
+    try {
+      doAccessCheck(name, KeyOpType.MANAGEMENT);
+      provider.invalidateCache(name);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
   @Override
   public void warmUpEncryptedKeys(String... names) throws IOException {
     readLock.lock();

+ 14 - 0
hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml

@@ -37,6 +37,20 @@
     </description>
   </property>
 
+  <property>
+    <name>hadoop.kms.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default KMS servlets. The value should be a comma
+      separated list of users and groups. The user list comes first and is
+      separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+
   <property>
     <name>hadoop.kms.ssl.enabled</name>
     <value>false</value>

+ 49 - 3
hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm

@@ -103,7 +103,9 @@ This cache is used with the following 3 methods only, `getCurrentKey()` and `get
 
 For the `getCurrentKey()` method, cached entries are kept for a maximum of 30000 milliseconds regardless the number of times the key is being accessed (to avoid stale keys to be considered current).
 
-For the `getKeyVersion()` method, cached entries are kept with a default inactivity timeout of 600000 milliseconds (10 mins).
+For the `getKeyVersion()` and `getMetadata()` methods, cached entries are kept with a default inactivity timeout of 600000 milliseconds (10 mins).
+
+The cache is invalidated when the key is deleted by `deleteKey()`, or when `invalidateCache()` is called.
 
 These configurations can be changed via the following properties in the `etc/hadoop/kms-site.xml` configuration file:
 
@@ -841,6 +843,16 @@ $H4 Rollover Key
       "material"    : "<material>",    //base64, not present without GET ACL
     }
 
+$H4 Invalidate Cache of a Key
+
+*REQUEST:*
+
+    POST http://HOST:PORT/kms/v1/key/<key-name>/_invalidatecache
+
+*RESPONSE:*
+
+    200 OK
+
 $H4 Delete Key
 
 *REQUEST:*
@@ -1063,13 +1075,13 @@ configuration properties instead.
 
 Environment Variable     | Configuration Property       | Configuration File
 -------------------------|------------------------------|--------------------
+KMS_TEMP                 | hadoop.http.temp.dir         | kms-site.xml
 KMS_HTTP_PORT            | hadoop.kms.http.port         | kms-site.xml
 KMS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | kms-site.xml
 KMS_MAX_THREADS          | hadoop.http.max.threads      | kms-site.xml
 KMS_SSL_ENABLED          | hadoop.kms.ssl.enabled       | kms-site.xml
 KMS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
 KMS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
-KMS_TEMP                 | hadoop.http.temp.dir         | kms-site.xml
 
 $H3 Default HTTP Services
 
@@ -1080,4 +1092,38 @@ Name               | Description
 /logLevel          | Get or set log level per class
 /logs              | Display log files
 /stacks            | Display JVM stacks
-/static/index.html | The static home page
+/static/index.html | The static home page
+
+To control the access to servlet `/conf`, `/jmx`, `/logLevel`, `/logs`,
+and `/stacks`, configure the following properties in `kms-site.xml`:
+
+```xml
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>true</value>
+    <description>Is service-level authorization enabled?</description>
+  </property>
+
+  <property>
+    <name>hadoop.security.instrumentation.requires.admin</name>
+    <value>true</value>
+    <description>
+      Indicates if administrator ACLs are required to access
+      instrumentation servlets (JMX, METRICS, CONF, STACKS).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default KMS servlets. The value should be a comma
+      separated list of users and groups. The user list comes first and is
+      separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+```

+ 72 - 17
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import com.google.common.base.Supplier;
+import com.google.common.cache.LoadingCache;
 import org.apache.curator.test.TestingServer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
@@ -31,7 +32,7 @@ import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
@@ -49,6 +50,8 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,11 +82,14 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
+import java.util.concurrent.LinkedBlockingQueue;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.when;
 
 public class TestKMS {
   private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
@@ -128,6 +134,11 @@ public class TestKMS {
         new KMSClientProvider[] { new KMSClientProvider(uri, conf) }, conf);
   }
 
+  private KMSClientProvider createKMSClientProvider(URI uri, Configuration conf)
+      throws IOException {
+    return new KMSClientProvider(uri, conf);
+  }
+
   protected <T> T runServer(String keystore, String password, File confDir,
       KMSCallable<T> callable) throws Exception {
     return runServer(-1, keystore, password, confDir, callable);
@@ -723,24 +734,68 @@ public class TestKMS {
 
         EncryptedKeyVersion ekv1 = kpce.generateEncryptedKey("k6");
         kpce.rollNewVersion("k6");
+        kpce.invalidateCache("k6");
+        EncryptedKeyVersion ekv2 = kpce.generateEncryptedKey("k6");
+        assertNotEquals("rollover did not generate a new key even after"
+            + " queue is drained", ekv1.getEncryptionKeyVersionName(),
+            ekv2.getEncryptionKeyVersionName());
+        return null;
+      }
+    });
+  }
 
-        /**
-         * due to the cache on the server side, client may get old keys.
-         * @see EagerKeyGeneratorKeyProviderCryptoExtension#rollNewVersion(String)
-         */
-        boolean rollSucceeded = false;
-        for (int i = 0; i <= EagerKeyGeneratorKeyProviderCryptoExtension
-            .KMS_KEY_CACHE_SIZE_DEFAULT + CommonConfigurationKeysPublic.
-            KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT; ++i) {
-          EncryptedKeyVersion ekv2 = kpce.generateEncryptedKey("k6");
-          if (!(ekv1.getEncryptionKeyVersionName()
-              .equals(ekv2.getEncryptionKeyVersionName()))) {
-            rollSucceeded = true;
-            break;
-          }
+  @Test
+  public void testKMSProviderCaching() throws Exception {
+    Configuration conf = new Configuration();
+    File confDir = getTestDir();
+    conf = createBaseKMSConf(confDir, conf);
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
+    writeConf(confDir, conf);
+
+    runServer(null, null, confDir, new KMSCallable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        final String keyName = "k1";
+        final String mockVersionName = "mock";
+        final Configuration conf = new Configuration();
+        final URI uri = createKMSUri(getKMSUrl());
+        KMSClientProvider kmscp = createKMSClientProvider(uri, conf);
+
+        // get the reference to the internal cache, to test invalidation.
+        ValueQueue vq =
+            (ValueQueue) Whitebox.getInternalState(kmscp, "encKeyVersionQueue");
+        LoadingCache<String, LinkedBlockingQueue<EncryptedKeyVersion>> kq =
+            ((LoadingCache<String, LinkedBlockingQueue<EncryptedKeyVersion>>)
+                Whitebox.getInternalState(vq, "keyQueues"));
+        EncryptedKeyVersion mockEKV = Mockito.mock(EncryptedKeyVersion.class);
+        when(mockEKV.getEncryptionKeyName()).thenReturn(keyName);
+        when(mockEKV.getEncryptionKeyVersionName()).thenReturn(mockVersionName);
+
+        // createKey()
+        KeyProvider.Options options = new KeyProvider.Options(conf);
+        options.setCipher("AES/CTR/NoPadding");
+        options.setBitLength(128);
+        options.setDescription("l1");
+        KeyProvider.KeyVersion kv0 = kmscp.createKey(keyName, options);
+        assertNotNull(kv0.getVersionName());
+
+        assertEquals("Default key version name is incorrect.", "k1@0",
+            kmscp.generateEncryptedKey(keyName).getEncryptionKeyVersionName());
+
+        kmscp.invalidateCache(keyName);
+        kq.get(keyName).put(mockEKV);
+        assertEquals("Key version incorrect after invalidating cache + putting"
+                + " mock key.", mockVersionName,
+            kmscp.generateEncryptedKey(keyName).getEncryptionKeyVersionName());
+
+        // test new version is returned after invalidation.
+        for (int i = 0; i < 100; ++i) {
+          kq.get(keyName).put(mockEKV);
+          kmscp.invalidateCache(keyName);
+          assertEquals("Cache invalidation guarantee failed.", "k1@0",
+              kmscp.generateEncryptedKey(keyName)
+                  .getEncryptionKeyVersionName());
         }
-        Assert.assertTrue("rollover did not generate a new key even after"
-            + " queue is drained", rollSucceeded);
         return null;
       }
     });

+ 2 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java

@@ -104,6 +104,7 @@ public class TestKMSAudit {
     kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.DELETE_KEY, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.ROLL_NEW_VERSION, "k1", "testmsg");
+    kmsAudit.ok(luser, KMSOp.INVALIDATE_CACHE, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
@@ -122,6 +123,7 @@ public class TestKMSAudit {
             // Not aggregated !!
             + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg"
             + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"
+            + "OK\\[op=INVALIDATE_CACHE, key=k1, user=luser\\] testmsg"
             // Aggregated
             + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
             + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"

+ 15 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java

@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -28,9 +32,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class HdfsBlockLocation extends BlockLocation {
+public class HdfsBlockLocation extends BlockLocation implements Serializable {
+  private static final long serialVersionUID = 0x7aecec92;
 
-  private final LocatedBlock block;
+  private transient LocatedBlock block;
 
   public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) {
     // Initialize with data from passed in BlockLocation
@@ -41,4 +46,12 @@ public class HdfsBlockLocation extends BlockLocation {
   public LocatedBlock getLocatedBlock() {
     return block;
   }
+
+  private void readObject(ObjectInputStream ois)
+      throws IOException, ClassNotFoundException {
+    ois.defaultReadObject();
+    // LocatedBlock is not Serializable
+    block = null;
+  }
+
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2233,7 +2233,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
 
   /**
    * Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
    * See {@link ClientProtocol#setBalancerBandwidth(long)}
    * for more details.
    *

+ 19 - 29
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -421,33 +421,36 @@ public class DFSInputStream extends FSInputStream
       }
       else {
         // search cached blocks first
-        int targetBlockIdx = locatedBlocks.findBlock(offset);
-        if (targetBlockIdx < 0) { // block is not cached
-          targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-          // fetch more blocks
-          final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-          assert (newBlocks != null) : "Could not find target position " + offset;
-          locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
-        }
-        blk = locatedBlocks.get(targetBlockIdx);
+        blk = fetchBlockAt(offset, 0, true);
       }
       return blk;
     }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+    return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+      throws IOException {
     synchronized(infoLock) {
       int targetBlockIdx = locatedBlocks.findBlock(offset);
       if (targetBlockIdx < 0) { // block is not cached
         targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+        useCache = false;
       }
-      // fetch blocks
-      final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-      if (newBlocks == null) {
-        throw new IOException("Could not find target position " + offset);
+      if (!useCache) { // fetch blocks
+        final LocatedBlocks newBlocks = (length == 0)
+            ? dfsClient.getLocatedBlocks(src, offset)
+            : dfsClient.getLocatedBlocks(src, offset, length);
+        if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+          throw new EOFException("Could not find target position " + offset);
+        }
+        locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
       }
-      locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+      return locatedBlocks.get(targetBlockIdx);
     }
   }
 
@@ -502,28 +505,15 @@ public class DFSInputStream extends FSInputStream
       assert (locatedBlocks != null) : "locatedBlocks is null";
       List<LocatedBlock> blockRange = new ArrayList<>();
       // search cached blocks first
-      int blockIdx = locatedBlocks.findBlock(offset);
-      if (blockIdx < 0) { // block is not cached
-        blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-      }
       long remaining = length;
       long curOff = offset;
       while(remaining > 0) {
-        LocatedBlock blk = null;
-        if(blockIdx < locatedBlocks.locatedBlockCount())
-          blk = locatedBlocks.get(blockIdx);
-        if (blk == null || curOff < blk.getStartOffset()) {
-          LocatedBlocks newBlocks;
-          newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-          locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-          continue;
-        }
+        LocatedBlock blk = fetchBlockAt(curOff, remaining, true);
         assert curOff >= blk.getStartOffset() : "Block not found";
         blockRange.add(blk);
         long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff;
         remaining -= bytesRead;
         curOff += bytesRead;
-        blockIdx++;
       }
       return blockRange;
     }

+ 70 - 35
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -142,8 +142,6 @@ class DataStreamer extends Daemon {
 
     /**
      * Record a connection exception.
-     * @param e
-     * @throws InvalidEncryptionKeyException
      */
     void recordFailure(final InvalidEncryptionKeyException e)
         throws InvalidEncryptionKeyException {
@@ -178,9 +176,8 @@ class DataStreamer extends Daemon {
         final StorageType[] targetStorageTypes,
         final Token<BlockTokenIdentifier> blockToken) throws IOException {
       //send the TRANSFER_BLOCK request
-      new Sender(out)
-          .transferBlock(block, blockToken, dfsClient.clientName, targets,
-              targetStorageTypes);
+      new Sender(out).transferBlock(block.getCurrentBlock(), blockToken,
+          dfsClient.clientName, targets, targetStorageTypes);
       out.flush();
       //ack
       BlockOpResponseProto transferResponse = BlockOpResponseProto
@@ -199,6 +196,42 @@ class DataStreamer extends Daemon {
     }
   }
 
+  static class BlockToWrite {
+    private ExtendedBlock currentBlock;
+
+    BlockToWrite(ExtendedBlock block) {
+      setCurrentBlock(block);
+    }
+
+    synchronized ExtendedBlock getCurrentBlock() {
+      return currentBlock == null ? null : new ExtendedBlock(currentBlock);
+    }
+
+    synchronized long getNumBytes() {
+      return currentBlock == null ? 0 : currentBlock.getNumBytes();
+    }
+
+    synchronized void setCurrentBlock(ExtendedBlock block) {
+      currentBlock = (block == null || block.getLocalBlock() == null) ?
+          null : new ExtendedBlock(block);
+    }
+
+    synchronized void setNumBytes(long numBytes) {
+      assert currentBlock != null;
+      currentBlock.setNumBytes(numBytes);
+    }
+
+    synchronized void setGenerationStamp(long generationStamp) {
+      assert currentBlock != null;
+      currentBlock.setGenerationStamp(generationStamp);
+    }
+
+    @Override
+    public synchronized String toString() {
+      return currentBlock == null ? "null" : currentBlock.toString();
+    }
+  }
+
   /**
    * Create a socket for a write pipeline
    *
@@ -440,7 +473,7 @@ class DataStreamer extends Daemon {
   }
 
   private volatile boolean streamerClosed = false;
-  protected volatile ExtendedBlock block; // its length is number of bytes acked
+  protected final BlockToWrite block; // its length is number of bytes acked
   protected Token<BlockTokenIdentifier> accessToken;
   private DataOutputStream blockStream;
   private DataInputStream blockReplyStream;
@@ -508,7 +541,7 @@ class DataStreamer extends Daemon {
                        ByteArrayManager byteArrayManage,
                        boolean isAppend, String[] favoredNodes,
                        EnumSet<AddBlockFlag> flags) {
-    this.block = block;
+    this.block = new BlockToWrite(block);
     this.dfsClient = dfsClient;
     this.src = src;
     this.progress = progress;
@@ -865,8 +898,9 @@ class DataStreamer extends Daemon {
       }
       long duration = Time.monotonicNow() - begin;
       if (duration > dfsclientSlowLogThresholdMs) {
-        LOG.warn("Slow waitForAckedSeqno took " + duration
-            + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
+        LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being"
+                + " written: {}, block: {}, Write pipeline datanodes: {}.",
+            duration, dfsclientSlowLogThresholdMs, src, block, nodes);
       }
     }
   }
@@ -1321,7 +1355,7 @@ class DataStreamer extends Daemon {
       LocatedBlock lb;
       //get a new datanode
       lb = dfsClient.namenode.getAdditionalDatanode(
-          src, stat.getFileId(), block, nodes, storageIDs,
+          src, stat.getFileId(), block.getCurrentBlock(), nodes, storageIDs,
           exclude.toArray(new DatanodeInfo[exclude.size()]),
           1, dfsClient.clientName);
       // a new node was allocated by the namenode. Update nodes.
@@ -1439,7 +1473,7 @@ class DataStreamer extends Daemon {
     } // while
 
     if (success) {
-      block = updatePipeline(newGS);
+      updatePipeline(newGS);
     }
   }
 
@@ -1535,21 +1569,22 @@ class DataStreamer extends Daemon {
   }
 
   private LocatedBlock updateBlockForPipeline() throws IOException {
-    return dfsClient.namenode.updateBlockForPipeline(block,
+    return dfsClient.namenode.updateBlockForPipeline(block.getCurrentBlock(),
         dfsClient.clientName);
   }
 
-  static ExtendedBlock newBlock(ExtendedBlock b, final long newGS) {
-    return new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
-        b.getNumBytes(), newGS);
+  void updateBlockGS(final long newGS) {
+    block.setGenerationStamp(newGS);
   }
 
   /** update pipeline at the namenode */
-  ExtendedBlock updatePipeline(long newGS) throws IOException {
-    final ExtendedBlock newBlock = newBlock(block, newGS);
-    dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock,
-        nodes, storageIDs);
-    return newBlock;
+  private void updatePipeline(long newGS) throws IOException {
+    final ExtendedBlock oldBlock = block.getCurrentBlock();
+    // the new GS has been propagated to all DN, it should be ok to update the
+    // local block state
+    updateBlockGS(newGS);
+    dfsClient.namenode.updatePipeline(dfsClient.clientName, oldBlock,
+        block.getCurrentBlock(), nodes, storageIDs);
   }
 
   DatanodeInfo[] getExcludedNodes() {
@@ -1569,31 +1604,29 @@ class DataStreamer extends Daemon {
     StorageType[] storageTypes;
     int count = dfsClient.getConf().getNumBlockWriteRetry();
     boolean success;
-    ExtendedBlock oldBlock = block;
+    final ExtendedBlock oldBlock = block.getCurrentBlock();
     do {
       errorState.resetInternalError();
       lastException.clear();
 
       DatanodeInfo[] excluded = getExcludedNodes();
-      block = oldBlock;
-      lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
-      block = lb.getBlock();
+      lb = locateFollowingBlock(
+          excluded.length > 0 ? excluded : null, oldBlock);
+      block.setCurrentBlock(lb.getBlock());
       block.setNumBytes(0);
       bytesSent = 0;
       accessToken = lb.getBlockToken();
       nodes = lb.getLocations();
       storageTypes = lb.getStorageTypes();
 
-      //
       // Connect to first DataNode in the list.
-      //
       success = createBlockOutputStream(nodes, storageTypes, 0L, false);
 
       if (!success) {
         LOG.warn("Abandoning " + block);
-        dfsClient.namenode.abandonBlock(block, stat.getFileId(), src,
-            dfsClient.clientName);
-        block = null;
+        dfsClient.namenode.abandonBlock(block.getCurrentBlock(),
+            stat.getFileId(), src, dfsClient.clientName);
+        block.setCurrentBlock(null);
         final DatanodeInfo badNode = nodes[errorState.getBadNodeIndex()];
         LOG.warn("Excluding datanode " + badNode);
         excludedNodes.put(badNode, badNode);
@@ -1654,7 +1687,7 @@ class DataStreamer extends Daemon {
 
         // We cannot change the block length in 'block' as it counts the number
         // of bytes ack'ed.
-        ExtendedBlock blockCopy = new ExtendedBlock(block);
+        ExtendedBlock blockCopy = block.getCurrentBlock();
         blockCopy.setNumBytes(stat.getBlockSize());
 
         boolean[] targetPinnings = getPinnings(nodes);
@@ -1764,9 +1797,9 @@ class DataStreamer extends Daemon {
     }
   }
 
-  private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)
-      throws IOException {
-    return DFSOutputStream.addBlock(excludedNodes, dfsClient, src, block,
+  private LocatedBlock locateFollowingBlock(DatanodeInfo[] excluded,
+      ExtendedBlock oldBlock) throws IOException {
+    return DFSOutputStream.addBlock(excluded, dfsClient, src, oldBlock,
         stat.getFileId(), favoredNodes, addBlockFlags);
   }
 
@@ -1810,7 +1843,7 @@ class DataStreamer extends Daemon {
    * @return the block this streamer is writing to
    */
   ExtendedBlock getBlock() {
-    return block;
+    return block.getCurrentBlock();
   }
 
   /**
@@ -2015,6 +2048,8 @@ class DataStreamer extends Daemon {
 
   @Override
   public String toString() {
-    return block == null? "block==null": "" + block.getLocalBlock();
+    final ExtendedBlock extendedBlock = block.getCurrentBlock();
+    return extendedBlock == null ?
+        "block==null" : "" + extendedBlock.getLocalBlock();
   }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -1599,7 +1599,7 @@ public class DistributedFileSystem extends FileSystem {
 
   /**
    * Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
    * The bandwidth parameter is the max number of bytes per second of network
    * bandwidth to be used by a datanode during balancing.
    *

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java

@@ -71,7 +71,7 @@ public class StripedDataStreamer extends DataStreamer {
 
   @Override
   protected void endBlock() {
-    coordinator.offerEndBlock(index, block);
+    coordinator.offerEndBlock(index, block.getCurrentBlock());
     super.endBlock();
   }
 
@@ -93,7 +93,7 @@ public class StripedDataStreamer extends DataStreamer {
   protected LocatedBlock nextBlockOutputStream() throws IOException {
     boolean success;
     LocatedBlock lb = getFollowingBlock();
-    block = lb.getBlock();
+    block.setCurrentBlock(lb.getBlock());
     block.setNumBytes(0);
     bytesSent = 0;
     accessToken = lb.getBlockToken();
@@ -105,7 +105,7 @@ public class StripedDataStreamer extends DataStreamer {
     success = createBlockOutputStream(nodes, storageTypes, 0L, false);
 
     if (!success) {
-      block = null;
+      block.setCurrentBlock(null);
       final DatanodeInfo badNode = nodes[getErrorState().getBadNodeIndex()];
       LOG.warn("Excluding datanode " + badNode);
       excludedNodes.put(badNode, badNode);
@@ -161,7 +161,7 @@ public class StripedDataStreamer extends DataStreamer {
         success = coordinator.takeStreamerUpdateResult(index);
         if (success) {
           // if all succeeded, update its block using the new GS
-          block = newBlock(block, newGS);
+          updateBlockGS(newGS);
         } else {
           // otherwise close the block stream and restart the recovery process
           closeStream();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -938,7 +938,7 @@ public interface ClientProtocol {
 
   /**
    * Tell all datanodes to use a new, non-persistent bandwidth value for
-   * dfs.balance.bandwidthPerSec.
+   * dfs.datanode.balance.bandwidthPerSec.
    *
    * @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
    * @throws IOException

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -51,7 +51,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private long lastUpdate;
   private long lastUpdateMonotonic;
   private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
+  private volatile String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private List<String> dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
@@ -293,11 +293,11 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
   /** network location */
   @Override
-  public synchronized String getNetworkLocation() {return location;}
+  public String getNetworkLocation() {return location;}
 
   /** Sets the network location */
   @Override
-  public synchronized void setNetworkLocation(String location) {
+  public void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
   }
 

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
  */
 @InterfaceAudience.Private
 public class FsPermissionExtension extends FsPermission {
+  private static final long serialVersionUID = 0x13c298a4;
+
   private final static short ACL_BIT = 1 << 12;
   private final static short ENCRYPTED_BIT = 1 << 13;
   private final boolean aclBit;

+ 51 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -121,9 +121,11 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmI
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
@@ -584,6 +586,55 @@ public class PBHelperClient {
     return blockTokens;
   }
 
+  public static AccessModeProto convert(BlockTokenIdentifier.AccessMode aMode) {
+    switch (aMode) {
+    case READ: return AccessModeProto.READ;
+    case WRITE: return AccessModeProto.WRITE;
+    case COPY: return AccessModeProto.COPY;
+    case REPLACE: return AccessModeProto.REPLACE;
+    default:
+      throw new IllegalArgumentException("Unexpected AccessMode: " + aMode);
+    }
+  }
+
+  public static BlockTokenIdentifier.AccessMode convert(
+      AccessModeProto accessModeProto) {
+    switch (accessModeProto) {
+    case READ: return BlockTokenIdentifier.AccessMode.READ;
+    case WRITE: return BlockTokenIdentifier.AccessMode.WRITE;
+    case COPY: return BlockTokenIdentifier.AccessMode.COPY;
+    case REPLACE: return BlockTokenIdentifier.AccessMode.REPLACE;
+    default:
+      throw new IllegalArgumentException("Unexpected AccessModeProto: " +
+          accessModeProto);
+    }
+  }
+
+  public static BlockTokenSecretProto convert(
+      BlockTokenIdentifier blockTokenSecret) {
+    BlockTokenSecretProto.Builder builder =
+        BlockTokenSecretProto.newBuilder();
+    builder.setExpiryDate(blockTokenSecret.getExpiryDate());
+    builder.setKeyId(blockTokenSecret.getKeyId());
+    String userId = blockTokenSecret.getUserId();
+    if (userId != null) {
+      builder.setUserId(userId);
+    }
+
+    String blockPoolId = blockTokenSecret.getBlockPoolId();
+    if (blockPoolId != null) {
+      builder.setBlockPoolId(blockPoolId);
+    }
+
+    builder.setBlockId(blockTokenSecret.getBlockId());
+
+    for (BlockTokenIdentifier.AccessMode aMode :
+        blockTokenSecret.getAccessModes()) {
+      builder.addModes(convert(aMode));
+    }
+    return builder.build();
+  }
+
   static public DatanodeInfo convert(DatanodeInfoProto di) {
     if (di == null) {
       return null;

+ 86 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java

@@ -19,11 +19,16 @@
 package org.apache.hadoop.hdfs.security.token.block;
 
 import java.io.DataInput;
+import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -44,20 +49,22 @@ public class BlockTokenIdentifier extends TokenIdentifier {
   private String blockPoolId;
   private long blockId;
   private final EnumSet<AccessMode> modes;
+  private boolean useProto;
 
   private byte [] cache;
 
   public BlockTokenIdentifier() {
-    this(null, null, 0, EnumSet.noneOf(AccessMode.class));
+    this(null, null, 0, EnumSet.noneOf(AccessMode.class), false);
   }
 
   public BlockTokenIdentifier(String userId, String bpid, long blockId,
-      EnumSet<AccessMode> modes) {
+      EnumSet<AccessMode> modes, boolean useProto) {
     this.cache = null;
     this.userId = userId;
     this.blockPoolId = bpid;
     this.blockId = blockId;
     this.modes = modes == null ? EnumSet.noneOf(AccessMode.class) : modes;
+    this.useProto = useProto;
   }
 
   @Override
@@ -144,9 +151,45 @@ public class BlockTokenIdentifier extends TokenIdentifier {
         ^ (blockPoolId == null ? 0 : blockPoolId.hashCode());
   }
 
+  /**
+   * readFields peeks at the first byte of the DataInput and determines if it
+   * was written using WritableUtils ("Legacy") or Protobuf. We can do this
+   * because we know the first field is the Expiry date.
+   *
+   * In the case of the legacy buffer, the expiry date is a VInt, so the size
+   * (which should always be >1) is encoded in the first byte - which is
+   * always negative due to this encoding. However, there are sometimes null
+   * BlockTokenIdentifier written so we also need to handle the case there
+   * the first byte is also 0.
+   *
+   * In the case of protobuf, the first byte is a type tag for the expiry date
+   * which is written as <code>(field_number << 3 |  wire_type</code>.
+   * So as long as the field_number  is less than 16, but also positive, then
+   * we know we have a Protobuf.
+   *
+   * @param in <code>DataInput</code> to deserialize this object from.
+   * @throws IOException
+   */
   @Override
   public void readFields(DataInput in) throws IOException {
     this.cache = null;
+
+    final DataInputStream dis = (DataInputStream)in;
+    if (!dis.markSupported()) {
+      throw new IOException("Could not peek first byte.");
+    }
+    dis.mark(1);
+    final byte firstByte = dis.readByte();
+    dis.reset();
+    if (firstByte <= 0) {
+      readFieldsLegacy(dis);
+    } else {
+      readFieldsProtobuf(dis);
+    }
+  }
+
+  @VisibleForTesting
+  void readFieldsLegacy(DataInput in) throws IOException {
     expiryDate = WritableUtils.readVLong(in);
     keyId = WritableUtils.readVInt(in);
     userId = WritableUtils.readString(in);
@@ -157,10 +200,44 @@ public class BlockTokenIdentifier extends TokenIdentifier {
     for (int i = 0; i < length; i++) {
       modes.add(WritableUtils.readEnum(in, AccessMode.class));
     }
+    useProto = false;
+  }
+
+  @VisibleForTesting
+  void readFieldsProtobuf(DataInput in) throws IOException {
+    BlockTokenSecretProto blockTokenSecretProto =
+        BlockTokenSecretProto.parseFrom((DataInputStream)in);
+    expiryDate = blockTokenSecretProto.getExpiryDate();
+    keyId = blockTokenSecretProto.getKeyId();
+    if (blockTokenSecretProto.hasUserId()) {
+      userId = blockTokenSecretProto.getUserId();
+    } else {
+      userId = null;
+    }
+    if (blockTokenSecretProto.hasBlockPoolId()) {
+      blockPoolId = blockTokenSecretProto.getBlockPoolId();
+    } else {
+      blockPoolId = null;
+    }
+    blockId = blockTokenSecretProto.getBlockId();
+    for (int i = 0; i < blockTokenSecretProto.getModesCount(); i++) {
+      AccessModeProto accessModeProto = blockTokenSecretProto.getModes(i);
+      modes.add(PBHelperClient.convert(accessModeProto));
+    }
+    useProto = true;
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
+    if (useProto) {
+      writeProtobuf(out);
+    } else {
+      writeLegacy(out);
+    }
+  }
+
+  @VisibleForTesting
+  void writeLegacy(DataOutput out) throws IOException {
     WritableUtils.writeVLong(out, expiryDate);
     WritableUtils.writeVInt(out, keyId);
     WritableUtils.writeString(out, userId);
@@ -172,6 +249,12 @@ public class BlockTokenIdentifier extends TokenIdentifier {
     }
   }
 
+  @VisibleForTesting
+  void writeProtobuf(DataOutput out) throws IOException {
+    BlockTokenSecretProto secret = PBHelperClient.convert(this);
+    out.write(secret.toByteArray());
+  }
+
   @Override
   public byte[] getBytes() {
     if(cache == null) cache = super.getBytes();
@@ -186,4 +269,4 @@ public class BlockTokenIdentifier extends TokenIdentifier {
       return KIND_NAME;
     }
   }
-}
+}

+ 33 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto

@@ -514,3 +514,36 @@ message RollingUpgradeStatusProto {
 message StorageUuidsProto {
   repeated string storageUuids = 1;
 }
+
+/**
+ * File access permissions mode.
+ */
+enum AccessModeProto {
+    READ = 1;
+    WRITE = 2;
+    COPY = 3;
+    REPLACE = 4;
+}
+
+/**
+ * Secret information for the BlockKeyProto. This is not sent on the wire as
+ * such but is used to pack a byte array and encrypted and put in
+ * BlockKeyProto.bytes
+ * When adding further fields, make sure they are optional as they would
+ * otherwise not be backwards compatible.
+ *
+ * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
+ * to Protocol Buffers, we use the first byte to determine the type. If the
+ * first byte is <=0 then it is a legacy token. This means that when using
+ * protobuf tokens, the the first field sent must have a `field_number` less
+ * than 16 to make sure that the first byte is positive. Otherwise it could be
+ * parsed as a legacy token. See HDFS-11026 for more discussion.
+ */
+message BlockTokenSecretProto {
+  optional uint64 expiryDate = 1;
+  optional uint32 keyId = 2;
+  optional string userId = 3;
+  optional string blockPoolId = 4;
+  optional uint64 blockId = 5;
+  repeated AccessModeProto modes = 6;
+}

+ 5 - 98
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -27,23 +27,18 @@
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
   <version>3.0.0-alpha3-SNAPSHOT</version>
-  <packaging>war</packaging>
+  <packaging>jar</packaging>
 
   <name>Apache Hadoop HttpFS</name>
   <description>Apache Hadoop HttpFS</description>
 
   <properties>
-    <httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
     <httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
     <httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
     <maven.build.timestamp.format>yyyy-MM-dd'T'HH:mm:ssZ</maven.build.timestamp.format>
     <httpfs.build.timestamp>${maven.build.timestamp}</httpfs.build.timestamp>
-    <httpfs.tomcat.dist.dir>
-      ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat
-    </httpfs.tomcat.dist.dir>
     <kerberos.realm>LOCALHOST</kerberos.realm>
     <test.exclude.kerberos.test>**/TestHttpFSWithKerberos.java</test.exclude.kerberos.test>
-    <tomcat.download.url>http://archive.apache.org/dist/tomcat/tomcat-6/v${tomcat.version}/bin/apache-tomcat-${tomcat.version}.tar.gz</tomcat.download.url>
   </properties>
 
   <dependencies>
@@ -75,7 +70,6 @@
     <dependency>
       <groupId>javax.servlet</groupId>
       <artifactId>javax.servlet-api</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
@@ -90,7 +84,10 @@
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-server</artifactId>
-      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -373,23 +370,6 @@
           </execution>
         </executions>
       </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-war-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>default-war</id>
-            <phase>package</phase>
-            <goals>
-              <goal>war</goal>
-            </goals>
-            <configuration>
-              <warName>webhdfs</warName>
-              <webappDirectory>${project.build.directory}/webhdfs</webappDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
@@ -490,79 +470,6 @@
               </execution>
             </executions>
           </plugin>
-          <!-- Downloading Tomcat TAR.GZ, using downloads/ dir to avoid downloading over an over -->
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>dist</id>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <phase>package</phase>
-                <configuration>
-                  <target>
-                    <mkdir dir="downloads"/>
-                    <get
-                        src="${tomcat.download.url}"
-                        dest="downloads/apache-tomcat-${tomcat.version}.tar.gz" verbose="true" skipexisting="true"/>
-                    <delete dir="${project.build.directory}/tomcat.exp"/>
-                    <mkdir dir="${project.build.directory}/tomcat.exp"/>
-
-                    <!-- Using Unix script to preserve file permissions -->
-                    <echo file="${project.build.directory}/tomcat-untar.sh">
-                      cd "${project.build.directory}/tomcat.exp"
-                      gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
-                    </echo>
-                    <exec executable="${shell-executable}" dir="${project.build.directory}" failonerror="true">
-                      <arg line="./tomcat-untar.sh"/>
-                    </exec>
-
-                    <move file="${project.build.directory}/tomcat.exp/apache-tomcat-${tomcat.version}"
-                          tofile="${httpfs.tomcat.dist.dir}"/>
-                    <delete dir="${project.build.directory}/tomcat.exp"/>
-                    <delete dir="${httpfs.tomcat.dist.dir}/webapps"/>
-                    <mkdir dir="${httpfs.tomcat.dist.dir}/webapps"/>
-                    <delete file="${httpfs.tomcat.dist.dir}/conf/server.xml"/>
-                    <copy file="${basedir}/src/main/tomcat/server.xml"
-                          toDir="${httpfs.tomcat.dist.dir}/conf"/>
-                    <delete file="${httpfs.tomcat.dist.dir}/conf/ssl-server.xml"/>
-                    <copy file="${basedir}/src/main/tomcat/ssl-server.xml.conf"
-                          toDir="${httpfs.tomcat.dist.dir}/conf"/>
-                    <delete file="${httpfs.tomcat.dist.dir}/conf/logging.properties"/>
-                    <copy file="${basedir}/src/main/tomcat/logging.properties"
-                          toDir="${httpfs.tomcat.dist.dir}/conf"/>
-                    <copy toDir="${httpfs.tomcat.dist.dir}/webapps/ROOT">
-                      <fileset dir="${basedir}/src/main/tomcat/ROOT"/>
-                    </copy>
-                    <copy toDir="${httpfs.tomcat.dist.dir}/webapps/webhdfs">
-                      <fileset dir="${project.build.directory}/webhdfs"/>
-                    </copy>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>tar</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target if="tar">
-                    <!-- Using Unix script to preserve symlinks -->
-                    <echo file="${project.build.directory}/dist-maketar.sh">
-                      cd "${project.build.directory}"
-                      tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
-                    </echo>
-                    <exec executable="${shell-executable}" dir="${project.build.directory}" failonerror="true">
-                      <arg line="./dist-maketar.sh"/>
-                    </exec>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
         </plugins>
       </build>
     </profile>

+ 15 - 32
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh

@@ -18,6 +18,14 @@
 # hadoop-env.sh is read prior to this file.
 #
 
+# HTTPFS config directory
+#
+# export HTTPFS_CONFIG=${HADOOP_CONF_DIR}
+
+# HTTPFS log directory
+#
+# export HTTPFS_LOG=${HADOOP_LOG_DIR}
+
 # HTTPFS temporary directory
 #
 # export HTTPFS_TEMP=${HADOOP_HOME}/temp
@@ -26,11 +34,7 @@
 #
 # export HTTPFS_HTTP_PORT=14000
 
-# The Admin port used by HTTPFS
-#
-# export HTTPFS_ADMIN_PORT=$((HTTPFS_HTTP_PORT + 1))
-
-# The maximum number of Tomcat handler threads
+# The maximum number of HTTP handler threads
 #
 # export HTTPFS_MAX_THREADS=1000
 
@@ -38,39 +42,18 @@
 #
 # export HTTPFS_HTTP_HOSTNAME=$(hostname -f)
 
-# The maximum size of Tomcat HTTP header
+# The maximum size of HTTP header
 #
 # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536
 
+# Whether SSL is enabled
+#
+# export HTTPFS_SSL_ENABLED=false
+
 # The location of the SSL keystore if using SSL
 #
 # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore
 
-#
 # The password of the SSL keystore if using SSL
 #
-# export HTTPFS_SSL_KEYSTORE_PASS=password
-
-##
-## Tomcat specific settings
-##
-#
-# Location of tomcat
-#
-# export HTTPFS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/httpfs/tomcat
-
-# Java System properties for HTTPFS should be specified in this variable.
-# The java.library.path and hadoop.home.dir properties are automatically
-# configured.  In order to supplement java.library.path,
-# one should add to the JAVA_LIBRARY_PATH env var.
-#
-# export CATALINA_OPTS=
-
-# PID file
-#
-# export CATALINA_PID=${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs.pid
-
-# Output file
-#
-# export CATALINA_OUT=${HTTPFS_LOG}/hadoop-${HADOOP_IDENT_STRING}-httpfs-${HOSTNAME}.out
-
+# export HTTPFS_SSL_KEYSTORE_PASS=password

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java

@@ -43,7 +43,7 @@ import java.util.Properties;
 public class HttpFSAuthenticationFilter
     extends DelegationTokenAuthenticationFilter {
 
-  private static final String CONF_PREFIX = "httpfs.authentication.";
+  static final String CONF_PREFIX = "httpfs.authentication.";
 
   private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
 

+ 170 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java

@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import static org.apache.hadoop.util.StringUtils.startupShutdownMessage;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.ConfigurationWithLogging;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The HttpFS web server.
+ */
+@InterfaceAudience.Private
+public class HttpFSServerWebServer {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HttpFSServerWebServer.class);
+
+  private static final String HTTPFS_DEFAULT_XML = "httpfs-default.xml";
+  private static final String HTTPFS_SITE_XML = "httpfs-site.xml";
+
+  // HTTP properties
+  static final String HTTP_PORT_KEY = "hadoop.httpfs.http.port";
+  private static final int HTTP_PORT_DEFAULT = 14000;
+  static final String HTTP_HOST_KEY = "hadoop.httpfs.http.host";
+  private static final String HTTP_HOST_DEFAULT = "0.0.0.0";
+
+  // SSL properties
+  private static final String SSL_ENABLED_KEY = "hadoop.httpfs.ssl.enabled";
+  private static final boolean SSL_ENABLED_DEFAULT = false;
+
+  private static final String HTTP_ADMINS_KEY =
+      "hadoop.httpfs.http.administrators";
+
+  private static final String NAME = "webhdfs";
+  private static final String SERVLET_PATH = "/webhdfs";
+
+  static {
+    Configuration.addDefaultResource(HTTPFS_DEFAULT_XML);
+    Configuration.addDefaultResource(HTTPFS_SITE_XML);
+  }
+
+  private final HttpServer2 httpServer;
+  private final String scheme;
+
+  HttpFSServerWebServer(Configuration conf, Configuration sslConf) throws
+      Exception {
+    // Override configuration with deprecated environment variables.
+    deprecateEnv("HTTPFS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
+        HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_HTTP_PORT", conf, HTTP_PORT_KEY,
+        HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_MAX_THREADS", conf,
+        HttpServer2.HTTP_MAX_THREADS_KEY, HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_MAX_HTTP_HEADER_SIZE", conf,
+        HttpServer2.HTTP_MAX_REQUEST_HEADER_SIZE_KEY, HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_MAX_HTTP_HEADER_SIZE", conf,
+        HttpServer2.HTTP_MAX_RESPONSE_HEADER_SIZE_KEY, HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_SSL_ENABLED", conf, SSL_ENABLED_KEY,
+        HTTPFS_SITE_XML);
+    deprecateEnv("HTTPFS_SSL_KEYSTORE_FILE", sslConf,
+        SSLFactory.SSL_SERVER_KEYSTORE_LOCATION,
+        SSLFactory.SSL_SERVER_CONF_DEFAULT);
+    deprecateEnv("HTTPFS_SSL_KEYSTORE_PASS", sslConf,
+        SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD,
+        SSLFactory.SSL_SERVER_CONF_DEFAULT);
+
+    boolean sslEnabled = conf.getBoolean(SSL_ENABLED_KEY,
+        SSL_ENABLED_DEFAULT);
+    scheme = sslEnabled ? HttpServer2.HTTPS_SCHEME : HttpServer2.HTTP_SCHEME;
+
+    String host = conf.get(HTTP_HOST_KEY, HTTP_HOST_DEFAULT);
+    int port = conf.getInt(HTTP_PORT_KEY, HTTP_PORT_DEFAULT);
+    URI endpoint = new URI(scheme, null, host, port, null, null, null);
+
+    httpServer = new HttpServer2.Builder()
+        .setName(NAME)
+        .setConf(conf)
+        .setSSLConf(sslConf)
+        .authFilterConfigurationPrefix(HttpFSAuthenticationFilter.CONF_PREFIX)
+        .setACL(new AccessControlList(conf.get(HTTP_ADMINS_KEY, " ")))
+        .addEndpoint(endpoint)
+        .build();
+  }
+
+  /**
+   * Load the deprecated environment variable into the configuration.
+   *
+   * @param varName the environment variable name
+   * @param conf the configuration
+   * @param propName the configuration property name
+   * @param confFile the configuration file name
+   */
+  private static void deprecateEnv(String varName, Configuration conf,
+                                   String propName, String confFile) {
+    String value = System.getenv(varName);
+    if (value == null) {
+      return;
+    }
+    String propValue = conf.get(propName);
+    LOG.warn("Environment variable {} = '{}' is deprecated and overriding"
+            + " property {} = '{}', please set the property in {} instead.",
+        varName, value, propName, propValue, confFile);
+    conf.set(propName, value, "environment variable " + varName);
+  }
+
+  public void start() throws IOException {
+    httpServer.start();
+  }
+
+  public void join() throws InterruptedException {
+    httpServer.join();
+  }
+
+  public void stop() throws Exception {
+    httpServer.stop();
+  }
+
+  public URL getUrl() {
+    InetSocketAddress addr = httpServer.getConnectorAddress(0);
+    if (null == addr) {
+      return null;
+    }
+    try {
+      return new URL(scheme, addr.getHostName(), addr.getPort(),
+          SERVLET_PATH);
+    } catch (MalformedURLException ex) {
+      throw new RuntimeException("It should never happen: " + ex.getMessage(),
+          ex);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    startupShutdownMessage(HttpFSServerWebServer.class, args, LOG);
+    Configuration conf = new ConfigurationWithLogging(
+        new Configuration(true));
+    Configuration sslConf = new ConfigurationWithLogging(
+        SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER));
+    HttpFSServerWebServer webServer =
+        new HttpFSServerWebServer(conf, sslConf);
+    webServer.start();
+    webServer.join();
+  }
+}

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java

@@ -84,7 +84,9 @@ public class MDCFilter implements Filter {
         MDC.put("user", user);
       }
       MDC.put("method", ((HttpServletRequest) request).getMethod());
-      MDC.put("path", ((HttpServletRequest) request).getPathInfo());
+      if (((HttpServletRequest) request).getPathInfo() != null) {
+        MDC.put("path", ((HttpServletRequest) request).getPathInfo());
+      }
       chain.doFilter(request, response);
     } finally {
       MDC.clear();

+ 0 - 76
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh

@@ -1,76 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-function hadoop_subproject_init
-{
-  local this
-  local binparent
-  local varlist
-
-  if [[ -z "${HADOOP_HTTPFS_ENV_PROCESSED}" ]]; then
-    if [[ -e "${HADOOP_CONF_DIR}/httpfs-env.sh" ]]; then
-      . "${HADOOP_CONF_DIR}/httpfs-env.sh"
-      export HADOOP_HTTPFS_ENV_PROCESSED=true
-    fi
-  fi
-
-  export HADOOP_CATALINA_PREFIX=httpfs
-
-  export HADOOP_CATALINA_TEMP="${HTTPFS_TEMP:-${HADOOP_HOME}/temp}"
-
-  hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
-
-  hadoop_deprecate_envvar HTTPFS_LOG HADOOP_LOG_DIR
-
-  export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}"
-  export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}"
-
-  export HTTPFS_HTTP_HOSTNAME=${HTTPFS_HTTP_HOSTNAME:-$(hostname -f)}
-
-  export HADOOP_CATALINA_HTTP_PORT="${HTTPFS_HTTP_PORT:-14000}"
-  export HADOOP_CATALINA_ADMIN_PORT="${HTTPFS_ADMIN_PORT:-$((HADOOP_CATALINA_HTTP_PORT+1))}"
-  export HADOOP_CATALINA_MAX_THREADS="${HTTPFS_MAX_THREADS:-150}"
-  export HADOOP_CATALINA_MAX_HTTP_HEADER_SIZE="${HTTPFS_MAX_HTTP_HEADER_SIZE:-65536}"
-
-  export HTTPFS_SSL_ENABLED=${HTTPFS_SSL_ENABLED:-false}
-
-  export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${HTTPFS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
-
-  export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/httpfs/tomcat}"
-  export HADOOP_CATALINA_HOME="${HTTPFS_CATALINA_HOME:-${CATALINA_BASE}}"
-
-  export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs-${HOSTNAME}.out}"
-
-  export CATALINA_PID="${CATALINA_PID:-${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs.pid}"
-
-  if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
-    varlist=$(env | egrep '(^HTTPFS|^CATALINA)' | cut -f1 -d= | grep -v _PASS)
-    for i in ${varlist}; do
-      hadoop_debug "Setting ${i} to ${!i}"
-    done
-  fi
-}
-
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
-   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
-else
-  echo "ERROR: Hadoop common not found." 2>&1
-  exit 1
-fi

+ 67 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh

@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ "${HADOOP_SHELL_EXECNAME}" = hdfs ]]; then
+  hadoop_add_subcommand "httpfs" "run HttpFS server, the HDFS HTTP Gateway"
+fi
+
+## @description  Command handler for httpfs subcommand
+## @audience     private
+## @stability    stable
+## @replaceable  no
+function hdfs_subcommand_httpfs
+{
+  if [[ -f "${HADOOP_CONF_DIR}/httpfs-env.sh" ]]; then
+    # shellcheck disable=SC1090
+    . "${HADOOP_CONF_DIR}/httpfs-env.sh"
+  fi
+
+  hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
+  hadoop_deprecate_envvar HTTPFS_LOG HADOOP_LOG_DIR
+
+  hadoop_using_envvar HTTPFS_HTTP_HOSTNAME
+  hadoop_using_envvar HTTPFS_HTTP_PORT
+  hadoop_using_envvar HTTPFS_MAX_HTTP_HEADER_SIZE
+  hadoop_using_envvar HTTPFS_MAX_THREADS
+  hadoop_using_envvar HTTPFS_SSL_ENABLED
+  hadoop_using_envvar HTTPFS_SSL_KEYSTORE_FILE
+  hadoop_using_envvar HTTPFS_TEMP
+
+  # shellcheck disable=SC2034
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION=true
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.fs.http.server.HttpFSServerWebServer
+  # shellcheck disable=SC2034
+
+  hadoop_add_param HADOOP_OPTS "-Dhttpfs.home.dir" \
+    "-Dhttpfs.home.dir=${HADOOP_HOME}"
+  hadoop_add_param HADOOP_OPTS "-Dhttpfs.config.dir" \
+    "-Dhttpfs.config.dir=${HTTPFS_CONFIG:-${HADOOP_CONF_DIR}}"
+  hadoop_add_param HADOOP_OPTS "-Dhttpfs.log.dir" \
+    "-Dhttpfs.log.dir=${HTTPFS_LOG:-${HADOOP_LOG_DIR}}"
+  hadoop_add_param HADOOP_OPTS "-Dhttpfs.http.hostname" \
+    "-Dhttpfs.http.hostname=${HTTPFS_HOST_NAME:-$(hostname -f)}"
+  if [[ -n "${HTTPFS_SSL_ENABLED}" ]]; then
+    hadoop_add_param HADOOP_OPTS "-Dhttpfs.ssl.enabled" \
+      "-Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}"
+  fi
+
+  if [[ "${HADOOP_DAEMON_MODE}" == "default" ]] ||
+     [[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
+    hadoop_mkdir "${HTTPFS_TEMP:-${HADOOP_HOME}/temp}"
+  fi
+}

+ 72 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml

@@ -15,6 +15,78 @@
 -->
 <configuration>
 
+  <property>
+    <name>hadoop.httpfs.http.port</name>
+    <value>14000</value>
+    <description>
+      The HTTP port for HttpFS REST API.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.httpfs.http.host</name>
+    <value>0.0.0.0</value>
+    <description>
+      The bind host for HttpFS REST API.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.httpfs.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default servlets for HttpFS server. The value
+      should be a comma separated list of users and groups. The user list
+      comes first and is separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.httpfs.ssl.enabled</name>
+    <value>false</value>
+    <description>
+      Whether SSL is enabled. Default is false, i.e. disabled.
+    </description>
+  </property>
+
+  <!-- HTTP properties -->
+
+  <property>
+    <name>hadoop.http.max.threads</name>
+    <value>1000</value>
+    <description>
+      The maxmimum number of threads.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.max.request.header.size</name>
+    <value>65536</value>
+    <description>
+      The maxmimum HTTP request header size.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.max.response.header.size</name>
+    <value>65536</value>
+    <description>
+      The maxmimum HTTP response header size.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.http.temp.dir</name>
+    <value>${hadoop.tmp.dir}/httpfs</value>
+    <description>
+      HttpFS temp directory.
+    </description>
+  </property>
+
   <!-- HttpFSServer Server -->
 
   <property>

+ 17 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html → hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/static/index.html

@@ -15,7 +15,22 @@
 
 -->
 <html>
+<head>
+    <title>Hadoop HttpFS Server</title>
+</head>
 <body>
-<b>HttpFs service</b>, service base URL at /webhdfs/v1.
+<h1>Hadoop HttpFS Server</h1>
+<ul>
+    <li>HttpFS Server service base URL at <b>/webhdfs/v1/</b></li>
+        <ul>
+            <li><a href="/webhdfs/v1/?op=LISTSTATUS">
+                /webhdfs/v1/?op=LISTSTATUS</a> to list root directory</li>
+        </ul>
+    <li><a href="/conf">HttpFS configuration properties</a></li>
+    <li><a href="/jmx">HttpFS JMX</a></li>
+    <li><a href="/logLevel">HttpFS log level</a></li>
+    <li><a href="/logs">HttpFS log files</a></li>
+    <li><a href="/stacks">HttpFS stacks</a></li>
+</ul>
 </body>
-</html>
+</html>

+ 98 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/webapps/webhdfs/WEB-INF/web.xml

@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
+
+  <listener>
+    <listener-class>org.apache.hadoop.fs.http.server.HttpFSServerWebApp</listener-class>
+  </listener>
+
+  <servlet>
+    <servlet-name>webservices-driver</servlet-name>
+    <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+    <init-param>
+      <param-name>com.sun.jersey.config.property.packages</param-name>
+      <param-value>org.apache.hadoop.fs.http.server,org.apache.hadoop.lib.wsrs</param-value>
+    </init-param>
+
+    <!-- Enables detailed Jersey request/response logging -->
+    <!--
+            <init-param>
+                <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+                <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+            </init-param>
+            <init-param>
+                <param-name>com.sun.jersey.spi.container.ContainerResponseFilters</param-name>
+                <param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
+            </init-param>
+    -->
+    <load-on-startup>1</load-on-startup>
+  </servlet>
+
+  <servlet-mapping>
+    <servlet-name>webservices-driver</servlet-name>
+    <url-pattern>/webhdfs/*</url-pattern>
+  </servlet-mapping>
+
+  <filter>
+    <filter-name>authFilter</filter-name>
+    <filter-class>org.apache.hadoop.fs.http.server.HttpFSAuthenticationFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>MDCFilter</filter-name>
+    <filter-class>org.apache.hadoop.lib.servlet.MDCFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>hostnameFilter</filter-name>
+    <filter-class>org.apache.hadoop.lib.servlet.HostnameFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>checkUploadContentType</filter-name>
+    <filter-class>org.apache.hadoop.fs.http.server.CheckUploadContentTypeFilter</filter-class>
+  </filter>
+
+  <filter>
+    <filter-name>fsReleaseFilter</filter-name>
+    <filter-class>org.apache.hadoop.fs.http.server.HttpFSReleaseFilter</filter-class>
+  </filter>
+
+  <filter-mapping>
+    <filter-name>authFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>MDCFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>hostnameFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>checkUploadContentType</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+  <filter-mapping>
+    <filter-name>fsReleaseFilter</filter-name>
+    <url-pattern>*</url-pattern>
+  </filter-mapping>
+
+</web-app>

+ 38 - 88
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh

@@ -13,102 +13,52 @@
 #  limitations under the License.
 #
 
-MYNAME="${BASH_SOURCE-$0}"
+MYNAME="${0##*/}"
 
-function hadoop_usage
+## @description  Print usage
+## @audience     private
+## @stability    stable
+## @replaceable  no
+function print_usage
 {
-  hadoop_add_subcommand "run" "Start HttpFS in the current window"
-  hadoop_add_subcommand "run -security" "Start in the current window with security manager"
-  hadoop_add_subcommand "start" "Start HttpFS in a separate window"
-  hadoop_add_subcommand "start -security" "Start in a separate window with security manager"
-  hadoop_add_subcommand "status" "Return the LSB compliant status"
-  hadoop_add_subcommand "stop" "Stop HttpFS, waiting up to 5 seconds for the process to end"
-  hadoop_add_subcommand "stop n" "Stop HttpFS, waiting up to n seconds for the process to end"
-  hadoop_add_subcommand "stop -force" "Stop HttpFS, wait up to 5 seconds and then use kill -KILL if still running"
-  hadoop_add_subcommand "stop n -force" "Stop HttpFS, wait up to n seconds and then use kill -KILL if still running"
-  hadoop_generate_usage "${MYNAME}" false
+  cat <<EOF
+Usage: ${MYNAME} run|start|status|stop
+commands:
+  run     Run HttpFS server, the HDFS HTTP Gateway
+  start   Start HttpFS server as a daemon
+  status  Return the status of the HttpFS server daemon
+  stop    Stop the HttpFS server daemon
+EOF
 }
 
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/httpfs-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/httpfs-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/httpfs-config.sh." 2>&1
-  exit 1
-fi
-
-# The Java System property 'httpfs.http.port' it is not used by HttpFS,
-# it is used in Tomcat's server.xml configuration file
-#
-
-# Mask the trustStorePassword
-# shellcheck disable=SC2086
-CATALINA_OPTS_DISP="$(echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/')"
-
-hadoop_debug "Using   CATALINA_OPTS:       ${CATALINA_OPTS_DISP}"
-
-# We're using hadoop-common, so set up some stuff it might need:
-hadoop_finalize
-
-hadoop_verify_logdir
+echo "WARNING: ${MYNAME} is deprecated," \
+  "please use 'hdfs [--daemon start|status|stop] httpfs'." >&2
 
 if [[ $# = 0 ]]; then
-  case "${HADOOP_DAEMON_MODE}" in
-    status)
-      hadoop_status_daemon "${CATALINA_PID}"
-      exit
-    ;;
-    start)
-      set -- "start"
-    ;;
-    stop)
-      set -- "stop"
-    ;;
-  esac
+  print_usage
+  exit
 fi
 
-hadoop_finalize_catalina_opts
-export CATALINA_OPTS
-
-# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
-#
-if [[ "${1}" = "stop" ]]; then
-  export JAVA_OPTS=${CATALINA_OPTS}
-fi
+case $1 in
+  run)
+    args=("httpfs")
+  ;;
+  start|stop|status)
+    args=("--daemon" "$1" "httpfs")
+  ;;
+  *)
+    echo "Unknown sub-command \"$1\"."
+    print_usage
+    exit 1
+  ;;
+esac
 
-# If ssl, the populate the passwords into ssl-server.xml before starting tomcat
-#
-# HTTPFS_SSL_KEYSTORE_PASS is a bit odd.
-# if undefined, then the if test will not enable ssl on its own
-# if "", set it to "password".
-# if custom, use provided password
-#
-if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then
-  if [[ -n "${HTTPFS_SSL_KEYSTORE_PASS+x}" ]] || [[ -n "${HTTPFS_SSL_TRUSTSTORE_PASS}" ]]; then
-    export HTTPFS_SSL_KEYSTORE_PASS=${HTTPFS_SSL_KEYSTORE_PASS:-password}
-    HTTPFS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
-      "$(hadoop_sed_escape "$HTTPFS_SSL_KEYSTORE_PASS")")
-    HTTPFS_SSL_TRUSTSTORE_PASS_ESCAPED=$(hadoop_xml_escape \
-      "$(hadoop_sed_escape "$HTTPFS_SSL_TRUSTSTORE_PASS")")
-    sed -e 's/"_httpfs_ssl_keystore_pass_"/'"\"${HTTPFS_SSL_KEYSTORE_PASS_ESCAPED}\""'/g' \
-        -e 's/"_httpfs_ssl_truststore_pass_"/'"\"${HTTPFS_SSL_TRUSTSTORE_PASS_ESCAPED}\""'/g' \
-      "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \
-      > "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml"
-    chmod 700 "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" >/dev/null 2>&1
-  fi
+# Locate bin
+if [[ -n "${HADOOP_HOME}" ]]; then
+  bin="${HADOOP_HOME}/bin"
+else
+  sbin=$(cd -P -- "$(dirname -- "$0")" >/dev/null && pwd -P)
+  bin=$(cd -P -- "${sbin}/../bin" >/dev/null && pwd -P)
 fi
 
-hadoop_add_param CATALINA_OPTS -Dhttpfs.http.hostname "-Dhttpfs.http.hostname=${HTTPFS_HOST_NAME}"
-hadoop_add_param CATALINA_OPTS -Dhttpfs.ssl.enabled "-Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}"
-
-exec "${HADOOP_CATALINA_HOME}/bin/catalina.sh" "$@"
+exec "${bin}/hdfs" "${args[@]}"

+ 0 - 16
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml

@@ -1,16 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
-</web-app>

+ 0 - 67
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties

@@ -1,67 +0,0 @@
-#
-#  All Rights Reserved.
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
-
-.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
-
-############################################################
-# Handler specific properties.
-# Describes specific configuration info for Handlers.
-############################################################
-
-1catalina.org.apache.juli.FileHandler.level = FINE
-1catalina.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
-1catalina.org.apache.juli.FileHandler.prefix = httpfs-catalina.
-
-2localhost.org.apache.juli.FileHandler.level = FINE
-2localhost.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
-2localhost.org.apache.juli.FileHandler.prefix = httpfs-localhost.
-
-3manager.org.apache.juli.FileHandler.level = FINE
-3manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
-3manager.org.apache.juli.FileHandler.prefix = httpfs-manager.
-
-4host-manager.org.apache.juli.FileHandler.level = FINE
-4host-manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
-4host-manager.org.apache.juli.FileHandler.prefix = httpfs-host-manager.
-
-java.util.logging.ConsoleHandler.level = FINE
-java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
-
-
-############################################################
-# Facility specific properties.
-# Provides extra control for each logger.
-############################################################
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler
-
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
-org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler
-
-# For example, set the com.xyz.foo logger to only log SEVERE
-# messages:
-#org.apache.catalina.startup.ContextConfig.level = FINE
-#org.apache.catalina.startup.HostConfig.level = FINE
-#org.apache.catalina.session.ManagerBase.level = FINE
-#org.apache.catalina.core.AprLifecycleListener.level=FINE

+ 0 - 151
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml

@@ -1,151 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
-
-   All Rights Reserved.
-
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Note:  A "Server" is not itself a "Container", so you may not
-     define subcomponents such as "Valves" at this level.
-     Documentation at /docs/config/server.html
- -->
-<Server port="${httpfs.admin.port}" shutdown="SHUTDOWN">
-
-  <!--APR library loader. Documentation at /docs/apr.html -->
-  <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on"/>
-  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
-  <Listener className="org.apache.catalina.core.JasperListener"/>
-  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
-  <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
-  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
-  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
-  <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
-
-  <!-- Global JNDI resources
-       Documentation at /docs/jndi-resources-howto.html
-  -->
-  <GlobalNamingResources>
-    <!-- Editable user database that can also be used by
-         UserDatabaseRealm to authenticate users
-    -->
-    <Resource name="UserDatabase" auth="Container"
-              type="org.apache.catalina.UserDatabase"
-              description="User database that can be updated and saved"
-              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
-              pathname="conf/tomcat-users.xml"/>
-  </GlobalNamingResources>
-
-  <!-- A "Service" is a collection of one or more "Connectors" that share
-       a single "Container" Note:  A "Service" is not itself a "Container",
-       so you may not define subcomponents such as "Valves" at this level.
-       Documentation at /docs/config/service.html
-   -->
-  <Service name="Catalina">
-
-    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
-    <!--
-    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
-        maxThreads="150" minSpareThreads="4"/>
-    -->
-
-
-    <!-- A "Connector" represents an endpoint by which requests are received
-         and responses are returned. Documentation at :
-         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
-         Java AJP  Connector: /docs/config/ajp.html
-         APR (HTTP/AJP) Connector: /docs/apr.html
-         Define a non-SSL HTTP/1.1 Connector on port ${httpfs.http.port}
-    -->
-    <Connector port="${httpfs.http.port}" protocol="HTTP/1.1"
-               connectionTimeout="20000"
-               maxHttpHeaderSize="${httpfs.max.http.header.size}"
-               redirectPort="8443"/>
-    <!-- A "Connector" using the shared thread pool-->
-    <!--
-    <Connector executor="tomcatThreadPool"
-               port="${httpfs.http.port}" protocol="HTTP/1.1"
-               connectionTimeout="20000"
-               redirectPort="8443" />
-    -->
-    <!-- Define a SSL HTTP/1.1 Connector on port 8443
-         This connector uses the JSSE configuration, when using APR, the
-         connector should be using the OpenSSL style configuration
-         described in the APR documentation -->
-    <!--
-    <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true"
-               clientAuth="false" sslProtocol="TLS" />
-    -->
-
-    <!-- Define an AJP 1.3 Connector on port 8009 -->
-
-
-    <!-- An Engine represents the entry point (within Catalina) that processes
- every request.  The Engine implementation for Tomcat stand alone
- analyzes the HTTP headers included with the request, and passes them
- on to the appropriate Host (virtual host).
- Documentation at /docs/config/engine.html -->
-
-    <!-- You should set jvmRoute to support load-balancing via AJP ie :
-    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-    -->
-    <Engine name="Catalina" defaultHost="localhost">
-
-      <!--For clustering, please take a look at documentation at:
-          /docs/cluster-howto.html  (simple how to)
-          /docs/config/cluster.html (reference documentation) -->
-      <!--
-      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-      -->
-
-      <!-- The request dumper valve dumps useful debugging information about
-           the request and response data received and sent by Tomcat.
-           Documentation at: /docs/config/valve.html -->
-      <!--
-      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-      -->
-
-      <!-- This Realm uses the UserDatabase configured in the global JNDI
-           resources under the key "UserDatabase".  Any edits
-           that are performed against this UserDatabase are immediately
-           available for use by the Realm.  -->
-      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
-             resourceName="UserDatabase"/>
-
-      <!-- Define the default virtual host
-           Note: XML Schema validation will not work with Xerces 2.2.
-       -->
-      <Host name="localhost" appBase="webapps"
-            unpackWARs="true" autoDeploy="true"
-            xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- SingleSignOn valve, share authentication between web applications
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-        -->
-
-        <!-- Access log processes all example.
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
-               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-        -->
-
-      </Host>
-    </Engine>
-  </Service>
-</Server>

+ 0 - 136
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml.conf

@@ -1,136 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
-
-   All Rights Reserved.
-
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Note:  A "Server" is not itself a "Container", so you may not
-     define subcomponents such as "Valves" at this level.
-     Documentation at /docs/config/server.html
- -->
-<Server port="${httpfs.admin.port}" shutdown="SHUTDOWN">
-
-  <!--APR library loader. Documentation at /docs/apr.html -->
-  <Listener className="org.apache.catalina.core.AprLifecycleListener"
-            SSLEngine="on"/>
-  <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
-  <Listener className="org.apache.catalina.core.JasperListener"/>
-  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
-  <Listener
-    className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
-  <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
-  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
-  <Listener
-    className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
-
-  <!-- Global JNDI resources
-       Documentation at /docs/jndi-resources-howto.html
-  -->
-  <GlobalNamingResources>
-    <!-- Editable user database that can also be used by
-         UserDatabaseRealm to authenticate users
-    -->
-    <Resource name="UserDatabase" auth="Container"
-              type="org.apache.catalina.UserDatabase"
-              description="User database that can be updated and saved"
-              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
-              pathname="conf/tomcat-users.xml"/>
-  </GlobalNamingResources>
-
-  <!-- A "Service" is a collection of one or more "Connectors" that share
-       a single "Container" Note:  A "Service" is not itself a "Container",
-       so you may not define subcomponents such as "Valves" at this level.
-       Documentation at /docs/config/service.html
-   -->
-  <Service name="Catalina">
-
-    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
-    <!--
-    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
-        maxThreads="httpfs.max.threads" minSpareThreads="4"/>
-    -->
-
-    <!-- Define a SSL HTTP/1.1 Connector on port 8443
-         This connector uses the JSSE configuration, when using APR, the
-         connector should be using the OpenSSL style configuration
-         described in the APR documentation -->
-    <Connector port="${httpfs.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true"
-               maxHttpHeaderSize="${httpfs.max.http.header.size}"
-               clientAuth="false" sslEnabledProtocols="TLSv1,TLSv1.1,TLSv1.2,SSLv2Hello"
-               keystoreFile="${httpfs.ssl.keystore.file}"
-               keystorePass="_httpfs_ssl_keystore_pass_"/>
-
-    <!-- Define an AJP 1.3 Connector on port 8009 -->
-
-
-    <!-- An Engine represents the entry point (within Catalina) that processes
- every request.  The Engine implementation for Tomcat stand alone
- analyzes the HTTP headers included with the request, and passes them
- on to the appropriate Host (virtual host).
- Documentation at /docs/config/engine.html -->
-
-    <!-- You should set jvmRoute to support load-balancing via AJP ie :
-    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-    -->
-    <Engine name="Catalina" defaultHost="localhost">
-
-      <!--For clustering, please take a look at documentation at:
-          /docs/cluster-howto.html  (simple how to)
-          /docs/config/cluster.html (reference documentation) -->
-      <!--
-      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-      -->
-
-      <!-- The request dumper valve dumps useful debugging information about
-           the request and response data received and sent by Tomcat.
-           Documentation at: /docs/config/valve.html -->
-      <!--
-      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-      -->
-
-      <!-- This Realm uses the UserDatabase configured in the global JNDI
-           resources under the key "UserDatabase".  Any edits
-           that are performed against this UserDatabase are immediately
-           available for use by the Realm.  -->
-      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
-             resourceName="UserDatabase"/>
-
-      <!-- Define the default virtual host
-           Note: XML Schema validation will not work with Xerces 2.2.
-       -->
-      <Host name="localhost" appBase="webapps"
-            unpackWARs="true" autoDeploy="true"
-            xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- SingleSignOn valve, share authentication between web applications
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-        -->
-
-        <!-- Access log processes all example.
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
-               prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-        -->
-
-      </Host>
-    </Engine>
-  </Service>
-</Server>

+ 105 - 31
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm

@@ -55,11 +55,12 @@ You need to restart Hadoop for the proxyuser configuration to become active.
 Start/Stop HttpFS
 -----------------
 
-To start/stop HttpFS use HttpFS's sbin/httpfs.sh script. For example:
+To start/stop HttpFS, use `hdfs --daemon start|stop httpfs`. For example:
 
-    $ sbin/httpfs.sh start
+    hadoop-${project.version} $ hdfs --daemon start httpfs
 
-NOTE: Invoking the script without any parameters list all possible parameters (start, stop, run, etc.). The `httpfs.sh` script is a wrapper for Tomcat's `catalina.sh` script that sets the environment variables and Java System properties required to run HttpFS server.
+NOTE: The script `httpfs.sh` is deprecated. It is now just a wrapper of
+`hdfs httpfs`.
 
 Test HttpFS is working
 ----------------------
@@ -67,52 +68,63 @@ Test HttpFS is working
     $ curl -sS 'http://<HTTPFSHOSTNAME>:14000/webhdfs/v1?op=gethomedirectory&user.name=hdfs'
     {"Path":"\/user\/hdfs"}
 
-Embedded Tomcat Configuration
------------------------------
-
-To configure the embedded Tomcat go to the `tomcat/conf`.
-
-HttpFS preconfigures the HTTP and Admin ports in Tomcat's `server.xml` to 14000 and 14001.
-
-Tomcat logs are also preconfigured to go to HttpFS's `logs/` directory.
-
-HttpFS default value for the maxHttpHeaderSize parameter in Tomcat's `server.xml` is set to 65536 by default.
-
-The following environment variables (which can be set in HttpFS's `etc/hadoop/httpfs-env.sh` script) can be used to alter those values:
-
-* HTTPFS\_HTTP\_PORT
-
-* HTTPFS\_ADMIN\_PORT
-
-* HADOOP\_LOG\_DIR
-
-* HTTPFS\_MAX\_HTTP\_HEADER\_SIZE
-
 HttpFS Configuration
 --------------------
 
+HttpFS preconfigures the HTTP port to 14000.
+
 HttpFS supports the following [configuration properties](./httpfs-default.html) in the HttpFS's `etc/hadoop/httpfs-site.xml` configuration file.
 
 HttpFS over HTTPS (SSL)
 -----------------------
 
-To configure HttpFS to work over SSL edit the [httpfs-env.sh](#httpfs-env.sh) script in the configuration directory setting the [HTTPFS\_SSL\_ENABLED](#HTTPFS_SSL_ENABLED) to [true](#true).
+Enable SSL in `etc/hadoop/httpfs-site.xml`:
 
-In addition, the following 2 properties may be defined (shown with default values):
+```xml
+  <property>
+    <name>hadoop.httpfs.ssl.enabled</name>
+    <value>true</value>
+    <description>
+      Whether SSL is enabled. Default is false, i.e. disabled.
+    </description>
+  </property>
+```
+
+Configure `etc/hadoop/ssl-server.xml` with proper values, for example:
+
+```xml
+  <property>
+    <name>ssl.server.keystore.location</name>
+    <value>${user.home}/.keystore</value>
+    <description>Keystore to be used. Must be specified.
+    </description>
+  </property>
 
-* HTTPFS\_SSL\_KEYSTORE\_FILE=$HOME/.keystore
+  <property>
+    <name>ssl.server.keystore.password</name>
+    <value></value>
+    <description>Must be specified.</description>
+  </property>
 
-* HTTPFS\_SSL\_KEYSTORE\_PASS=password
+  <property>
+    <name>ssl.server.keystore.keypassword</name>
+    <value></value>
+    <description>Must be specified.</description>
+  </property>
+```
 
-In the HttpFS `tomcat/conf` directory, replace the `server.xml` file with the `ssl-server.xml` file.
+The SSL passwords can be secured by a credential provider. See
+[Credential Provider API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
 You need to create an SSL certificate for the HttpFS server. As the `httpfs` Unix user, using the Java `keytool` command to create the SSL certificate:
 
-    $ keytool -genkey -alias tomcat -keyalg RSA
+    $ keytool -genkey -alias jetty -keyalg RSA
 
 You will be asked a series of questions in an interactive prompt. It will create the keystore file, which will be named **.keystore** and located in the `httpfs` user home directory.
 
-The password you enter for "keystore password" must match the value of the `HTTPFS_SSL_KEYSTORE_PASS` environment variable set in the `httpfs-env.sh` script in the configuration directory.
+The password you enter for "keystore password" must match the value of the
+property `ssl.server.keystore.password` set in the `ssl-server.xml` in the
+configuration directory.
 
 The answer to "What is your first and last name?" (i.e. "CN") must be the hostname of the machine where the HttpFS Server will be running.
 
@@ -121,3 +133,65 @@ Start HttpFS. It should work over HTTPS.
 Using the Hadoop `FileSystem` API or the Hadoop FS shell, use the `swebhdfs://` scheme. Make sure the JVM is picking up the truststore containing the public key of the SSL certificate if using a self-signed certificate.
 
 NOTE: Some old SSL clients may use weak ciphers that are not supported by the HttpFS server. It is recommended to upgrade the SSL client.
+
+Deprecated Environment Variables
+--------------------------------
+
+The following environment variables are deprecated. Set the corresponding
+configuration properties instead.
+
+Environment Variable        | Configuration Property       | Configuration File
+----------------------------|------------------------------|--------------------
+HTTPFS_TEMP                 | hadoop.http.temp.dir         | httpfs-site.xml
+HTTPFS_HTTP_PORT            | hadoop.httpfs.http.port      | httpfs-site.xml
+HTTPFS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | httpfs-site.xml
+HTTPFS_MAX_THREADS          | hadoop.http.max.threads      | httpfs-site.xml
+HTTPFS_SSL_ENABLED          | hadoop.httpfs.ssl.enabled    | httpfs-site.xml
+HTTPFS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
+HTTPFS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
+
+HTTP Default Services
+---------------------
+
+Name               | Description
+-------------------|------------------------------------
+/conf              | Display configuration properties
+/jmx               | Java JMX management interface
+/logLevel          | Get or set log level per class
+/logs              | Display log files
+/stacks            | Display JVM stacks
+/static/index.html | The static home page
+
+To control the access to servlet `/conf`, `/jmx`, `/logLevel`, `/logs`,
+and `/stacks`, configure the following properties in `httpfs-site.xml`:
+
+```xml
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>true</value>
+    <description>Is service-level authorization enabled?</description>
+  </property>
+
+  <property>
+    <name>hadoop.security.instrumentation.requires.admin</name>
+    <value>true</value>
+    <description>
+      Indicates if administrator ACLs are required to access
+      instrumentation servlets (JMX, METRICS, CONF, STACKS).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.httpfs.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default servlets for HttpFS server. The value
+      should be a comma separated list of users and groups. The user list
+      comes first and is separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+```

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md

@@ -32,7 +32,7 @@ How Does HttpFS Works?
 
 HttpFS is a separate service from Hadoop NameNode.
 
-HttpFS itself is Java web-application and it runs using a preconfigured Tomcat bundled with HttpFS binary distribution.
+HttpFS itself is Java Jetty web-application.
 
 HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file system operation. For example, using the `curl` Unix command:
 

+ 106 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java

@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.text.MessageFormat;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * Test {@link HttpFSServerWebServer}.
+ */
+public class TestHttpFSServerWebServer {
+
+  @Rule
+  public Timeout timeout = new Timeout(30000);
+  private HttpFSServerWebServer webServer;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    File homeDir = GenericTestUtils.getTestDir();
+    File confDir = new File(homeDir, "etc/hadoop");
+    File logsDir = new File(homeDir, "logs");
+    File tempDir = new File(homeDir, "temp");
+    confDir.mkdirs();
+    logsDir.mkdirs();
+    tempDir.mkdirs();
+    System.setProperty("hadoop.home.dir", homeDir.getAbsolutePath());
+    System.setProperty("hadoop.log.dir", logsDir.getAbsolutePath());
+    System.setProperty("httpfs.home.dir", homeDir.getAbsolutePath());
+    System.setProperty("httpfs.log.dir", logsDir.getAbsolutePath());
+    System.setProperty("httpfs.config.dir", confDir.getAbsolutePath());
+    new File(confDir, "httpfs-signature.secret").createNewFile();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(HttpFSServerWebServer.HTTP_HOST_KEY, "localhost");
+    conf.setInt(HttpFSServerWebServer.HTTP_PORT_KEY, 0);
+    Configuration sslConf = new Configuration();
+    webServer = new HttpFSServerWebServer(conf, sslConf);
+  }
+
+  @Test
+  public void testStartStop() throws Exception {
+    webServer.start();
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(webServer.getUrl(), MessageFormat.format(
+        "/webhdfs/v1/?user.name={0}&op=liststatus", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    BufferedReader reader = new BufferedReader(
+        new InputStreamReader(conn.getInputStream()));
+    reader.readLine();
+    reader.close();
+    webServer.stop();
+  }
+
+  @Test
+  public void testJustStop() throws Exception {
+    webServer.stop();
+  }
+
+  @Test
+  public void testDoubleStop() throws Exception {
+    webServer.start();
+    webServer.stop();
+    webServer.stop();
+  }
+
+  @Test
+  public void testDoubleStart() throws Exception {
+    webServer.start();
+    webServer.start();
+    webServer.stop();
+  }
+
+}

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java

@@ -54,9 +54,11 @@ public class PrivilegedNfsGatewayStarter implements Daemon {
     }
 
     try {
-      registrationSocket = new DatagramSocket(
-                    new InetSocketAddress("localhost", clientPort));
+      InetSocketAddress socketAddress =
+                new InetSocketAddress("localhost", clientPort);
+      registrationSocket = new DatagramSocket(null);
       registrationSocket.setReuseAddress(true);
+      registrationSocket.bind(socketAddress);
     } catch (SocketException e) {
       LOG.error("Init failed for port=" + clientPort, e);
       throw e;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -41,7 +41,7 @@ function hadoop_usage
   hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
   hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on a given node"
   hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
-  hadoop_add_subcommand "erasurecode" "run a HDFS ErasureCoding CLI"
+  hadoop_add_subcommand "ec" "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
   hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility"
   hadoop_add_subcommand "getconf" "get config values from configuration"
@@ -129,7 +129,7 @@ function hdfscmd_case
       echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
       exit 0
     ;;
-    erasurecode)
+    ec)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
     ;;
     fetchdt)

+ 4 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -219,7 +219,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
       "dfs.namenode.reconstruction.pending.timeout-sec";
-  public static final int     DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+  public static final int
+      DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = 300;
 
   public static final String  DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY =
       "dfs.namenode.maintenance.replication.min";
@@ -640,6 +641,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT = 600L;
   public static final String  DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY = "dfs.block.access.token.lifetime";
   public static final long    DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT = 600L;
+  public static final String  DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE = "dfs.block.access.token.protobuf.enable";
+  public static final boolean DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT = false;
 
   public static final String DFS_BLOCK_REPLICATOR_CLASSNAME_KEY = "dfs.block.replicator.classname";
   public static final Class<BlockPlacementPolicyDefault> DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT = BlockPlacementPolicyDefault.class;
@@ -1294,14 +1297,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE
       = HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT;
 
-
-  @Deprecated
-  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY =
-      HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-  @Deprecated
-  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT =
-      HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-
   @Deprecated
   public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY =
       HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
@@ -1395,13 +1390,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT;
 
   @Deprecated
-  public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
-      HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
-
-  @Deprecated
-  public static final long    DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT =
-      HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
-  @Deprecated
   public static final String  DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
       HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
   @Deprecated

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java

@@ -180,7 +180,14 @@ class JNStorage extends Storage {
   }
 
   void format(NamespaceInfo nsInfo) throws IOException {
+    unlockAll();
+    try {
+      sd.analyzeStorage(StartupOption.FORMAT, this, true);
+    } finally {
+      sd.unlock();
+    }
     setStorageInfo(nsInfo);
+
     LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
     // Unlock the directory before formatting, because we will
     // re-analyze it after format(). The analyzeStorage() call

+ 12 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java

@@ -75,6 +75,7 @@ public class BlockTokenSecretManager extends
 
   private final int intRange;
   private final int nnRangeStart;
+  private final boolean useProto;
 
   private final SecureRandom nonceGenerator = new SecureRandom();
 
@@ -83,11 +84,13 @@ public class BlockTokenSecretManager extends
    *
    * @param keyUpdateInterval how often a new key will be generated
    * @param tokenLifetime how long an individual token is valid
+   * @param useProto should we use new protobuf style tokens
    */
   public BlockTokenSecretManager(long keyUpdateInterval,
-      long tokenLifetime, String blockPoolId, String encryptionAlgorithm) {
+      long tokenLifetime, String blockPoolId, String encryptionAlgorithm,
+      boolean useProto) {
     this(false, keyUpdateInterval, tokenLifetime, blockPoolId,
-        encryptionAlgorithm, 0, 1);
+        encryptionAlgorithm, 0, 1, useProto);
   }
 
   /**
@@ -102,8 +105,9 @@ public class BlockTokenSecretManager extends
    */
   public BlockTokenSecretManager(long keyUpdateInterval,
       long tokenLifetime, int nnIndex, int numNNs,  String blockPoolId,
-      String encryptionAlgorithm) {
-    this(true, keyUpdateInterval, tokenLifetime, blockPoolId, encryptionAlgorithm, nnIndex, numNNs);
+      String encryptionAlgorithm, boolean useProto) {
+    this(true, keyUpdateInterval, tokenLifetime, blockPoolId,
+        encryptionAlgorithm, nnIndex, numNNs, useProto);
     Preconditions.checkArgument(nnIndex >= 0);
     Preconditions.checkArgument(numNNs > 0);
     setSerialNo(new SecureRandom().nextInt());
@@ -111,7 +115,8 @@ public class BlockTokenSecretManager extends
   }
 
   private BlockTokenSecretManager(boolean isMaster, long keyUpdateInterval,
-      long tokenLifetime, String blockPoolId, String encryptionAlgorithm, int nnIndex, int numNNs) {
+      long tokenLifetime, String blockPoolId, String encryptionAlgorithm,
+      int nnIndex, int numNNs, boolean useProto) {
     this.intRange = Integer.MAX_VALUE / numNNs;
     this.nnRangeStart = intRange * nnIndex;
     this.isMaster = isMaster;
@@ -120,6 +125,7 @@ public class BlockTokenSecretManager extends
     this.allKeys = new HashMap<Integer, BlockKey>();
     this.blockPoolId = blockPoolId;
     this.encryptionAlgorithm = encryptionAlgorithm;
+    this.useProto = useProto;
     generateKeys();
   }
 
@@ -246,7 +252,7 @@ public class BlockTokenSecretManager extends
   public Token<BlockTokenIdentifier> generateToken(String userId,
       ExtendedBlock block, EnumSet<BlockTokenIdentifier.AccessMode> modes) throws IOException {
     BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block
-        .getBlockPoolId(), block.getBlockId(), modes);
+        .getBlockPoolId(), block.getBlockId(), modes, useProto);
     return new Token<BlockTokenIdentifier>(id, this);
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -114,7 +114,7 @@ import com.google.common.base.Preconditions;
  * defined in the default configuration file:
  * <pre>
  * <property>
- *   <name>dfs.balance.bandwidthPerSec</name>
+ *   <name>dfs.datanode.balance.bandwidthPerSec</name>
  *   <value>1048576</value>
  * <description>  Specifies the maximum bandwidth that each datanode 
  * can utilize for the balancing purpose in term of the number of bytes 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java

@@ -88,7 +88,6 @@ import com.google.common.base.Preconditions;
 public class Dispatcher {
   static final Log LOG = LogFactory.getLog(Dispatcher.class);
 
-  private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
   /**
    * the period of time to delay the usage of a DataNode after hitting
    * errors when using it for migrating data
@@ -1108,6 +1107,8 @@ public class Dispatcher {
     }
     if (moveExecutor == null) {
       LOG.warn("No mover threads available: skip moving " + p);
+      targetDn.removePendingBlock(p);
+      p.proxySource.removePendingBlock(p);
       return;
     }
     moveExecutor.execute(new Runnable() {

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java

@@ -69,8 +69,12 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
           + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
       String encryptionAlgorithm = conf.get(
           DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
+      final boolean enableProtobuf = conf.getBoolean(
+          DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
+          DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
       this.blockTokenSecretManager = new BlockTokenSecretManager(
-          updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
+          updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm,
+          enableProtobuf);
       this.blockTokenSecretManager.addKeys(keys);
 
       // sync block keys with NN more frequently than NN updates its block keys

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -542,6 +542,9 @@ public class BlockManager implements BlockStatsMXBean {
     
     String nsId = DFSUtil.getNamenodeNameServiceId(conf);
     boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);
+    boolean shouldWriteProtobufToken = conf.getBoolean(
+        DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
+        DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
 
     if (isHaEnabled) {
       // figure out which index we are of the nns
@@ -555,10 +558,12 @@ public class BlockManager implements BlockStatsMXBean {
         nnIndex++;
       }
       return new BlockTokenSecretManager(updateMin * 60 * 1000L,
-          lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null, encryptionAlgorithm);
+          lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null,
+          encryptionAlgorithm, shouldWriteProtobufToken);
     } else {
       return new BlockTokenSecretManager(updateMin*60*1000L,
-          lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm);
+          lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm,
+          shouldWriteProtobufToken);
     }
   }
 

Some files were not shown because too many files changed in this diff